repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
19 values
size
stringlengths
4
7
content
stringlengths
721
1.04M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
15
997
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
qPCR4vir/orange3
Orange/tests/test_ada_boost.py
1
3357
# Test methods with long descriptive names can omit docstrings # pylint: disable=missing-docstring import unittest import numpy as np from Orange.data import Table from Orange.classification import TreeLearner from Orange.regression import TreeRegressionLearner from Orange.ensembles import SklAdaBoostLearner, SklAdaBoostRegressionLearner from Orange.evaluation import CrossValidation, CA, RMSE class TestSklAdaBoostLearner(unittest.TestCase): @classmethod def setUpClass(cls): cls.iris = Table("iris") cls.housing = Table("housing") def test_adaboost(self): learn = SklAdaBoostLearner() results = CrossValidation(self.iris, [learn], k=3) ca = CA(results) self.assertGreater(ca, 0.9) self.assertLess(ca, 0.99) def test_adaboost_base_estimator(self): np.random.seed(0) stump_estimator = TreeLearner(max_depth=1) tree_estimator = TreeLearner() stump = SklAdaBoostLearner(base_estimator=stump_estimator) tree = SklAdaBoostLearner(base_estimator=tree_estimator) results = CrossValidation(self.iris, [stump, tree], k=3) ca = CA(results) self.assertLess(ca[0], ca[1]) def test_predict_single_instance(self): learn = SklAdaBoostLearner() m = learn(self.iris) ins = self.iris[0] m(ins) _, _ = m(ins, m.ValueProbs) def test_predict_table(self): learn = SklAdaBoostLearner() m = learn(self.iris) m(self.iris) _, _ = m(self.iris, m.ValueProbs) def test_predict_numpy(self): learn = SklAdaBoostLearner() m = learn(self.iris) _, _ = m(self.iris.X, m.ValueProbs) def test_adaboost_adequacy(self): learner = SklAdaBoostLearner() self.assertRaises(ValueError, learner, self.housing) def test_adaboost_reg(self): learn = SklAdaBoostRegressionLearner() results = CrossValidation(self.housing, [learn], k=3) _ = RMSE(results) def test_adaboost_reg_base_estimator(self): np.random.seed(0) stump_estimator = TreeRegressionLearner(max_depth=1) tree_estimator = TreeRegressionLearner() stump = SklAdaBoostRegressionLearner(base_estimator=stump_estimator) tree = SklAdaBoostRegressionLearner(base_estimator=tree_estimator) results = CrossValidation(self.housing, [stump, tree], k=3) rmse = RMSE(results) self.assertGreaterEqual(rmse[0], rmse[1]) def test_predict_single_instance_reg(self): learn = SklAdaBoostRegressionLearner() m = learn(self.housing) ins = self.housing[0] pred = m(ins) self.assertGreaterEqual(pred, 0) def test_predict_table_reg(self): learn = SklAdaBoostRegressionLearner() m = learn(self.housing) pred = m(self.housing) self.assertEqual(len(self.housing), len(pred)) self.assertGreater(all(pred), 0) def test_predict_numpy_reg(self): learn = SklAdaBoostRegressionLearner() m = learn(self.housing) pred = m(self.housing.X) self.assertEqual(len(self.housing), len(pred)) self.assertGreater(all(pred), 0) def test_adaboost_adequacy_reg(self): learner = SklAdaBoostRegressionLearner() self.assertRaises(ValueError, learner, self.iris)
bsd-2-clause
61,752,926,166,089,920
33.96875
77
0.657432
false
kernsuite-debian/obit
python/OTWindow.py
1
7357
""" OTWindow allows running wxPython widgets Widgets using wxPython must all be created and run in the same thread. This class creates a wxPython App in a separate thread and allows starting new widgets in this same thread. New widgets can be created using the functions newMsgWin(tw) create message windiow and execute TaskWindow tw """ # $Id: OTWindow.py 2 2008-06-10 15:32:27Z bill.cotton $ #----------------------------------------------------------------------- # Copyright (C) 2006 # Associated Universities, Inc. Washington DC, USA. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of # the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this program; if not, write to the Free # Software Foundation, Inc., 675 Massachusetts Ave, Cambridge, # MA 02139, USA. # # Correspondence concerning this software should be addressed as follows: # Internet email: bcotton@nrao.edu. # Postal address: William Cotton # National Radio Astronomy Observatory # 520 Edgemont Road # Charlottesville, VA 22903-2475 USA #----------------------------------------------------------------------- class OTWindow: def start(self): """ start the GUI thread """ import thread thread.start_new_thread(self.run, ()) def run(self): """ Note that OTWindow2 is first imported ***here***. This is the second thread. OTWindow2 imports wxPython, if we imported it at the module level instead of in this function, the import would occur in the main thread and wxPython would not run correctly in the second thread. The wxPython GUI MainLoop is run here (i.e. no return) """ ################################################################ try: import OTWindow2 self.app = OTWindow2.OTGUIApp() self.started = True self.app.MainLoop() except TypeError: self.app = None except Exception, e: self.app = None #print "DEBUG: oh bugger untrapped exception in OTWindow.run" #print e def add_MsgWin(self, tw): """ New Task message widget Send an event to the catcher window in the other thread and tell it to create a MsgWin window. tw = TaskWindow of task to be run """ ################################################################ import OTWindow2, MsgWin if self.app: evt = OTWindow2.MsgWinEvt() evt.evt_type = OTWindow2.EVT_NEW # Set event type evt.evt_tw = tw # add task window self.app.catcher.AddPendingEvent(evt); else: OTWindow2.add_MsgWin(tw) # end add_MsgWin def SetLabel(self, Id, label): """ Set Widget label Send an event to the catcher window in the other thread and tell it to Setlabel on window Id to label Id = widget Id label = New text to label """ ################################################################ import OTWindow2 evt = OTWindow2.MsgWinEvt() evt.evt_type = OTWindow2.EVT_LABEL # Set event type evt.evt_Id = Id # Set widget Id evt.evt_label = label # add new label text self.app.catcher.AddPendingEvent(evt); # end SetLabel def Bind(self, Id, handler): """ Set Button event handler Send an event to the catcher window in the other thread and tell it to rebind the event handler on button Id Id = widget Id handler = new event handler """ ################################################################ import OTWindow2 evt = OTWindow2.MsgWinEvt() evt.evt_type = OTWindow2.EVT_BIND # Set event type evt.evt_Id = Id # Set widget Id evt.evt_handler = handler # add task window self.app.catcher.AddPendingEvent(evt); # end Bind def Update(self, Id): """ Update Widget Id Send an event to the catcher window in the other thread and tell it to refresh the display of widget Id Id = widget Id """ ################################################################ import OTWindow2 evt = OTWindow2.MsgWinEvt() evt.evt_type = OTWindow2.EVT_UPDATE # Set event type evt.evt_Id = Id # Set widget Id self.app.catcher.AddPendingEvent(evt); # end Update def Message(self, Id, message): """ Write messages in TextCtrl Send an event to the catcher window in the other thread and tell it to append message(s) in widget Id Id = widget Id (a TextCtrl) message = either a single string or an array of strings """ ################################################################ import OTWindow2 evt = OTWindow2.MsgWinEvt() evt.evt_type = OTWindow2.EVT_MESS # Set event type evt.evt_Id = Id # Set widget Id evt.evt_mess = message # add task message(s) self.app.catcher.AddPendingEvent(evt); # end Message # end class OTWindow # Startup wxPython windowing gui = OTWindow() gui.started = False gui.start() # Externally callable routine to create a MsgWin (task message window) def newMsgWin(tw): """ New task message window Create a new task message window, run the task displaying messages and handling communications tw = TaskWindow for task to be executed. """ ################################################################ # Be sure gui thread started import time while (not gui.started): time.sleep(0.2) gui.add_MsgWin(tw) # end newMsgWin def CallSetLabel (Id, label): """ Set label on widget Id Id = widget Id label = New text to label """ gui.SetLabel (Id, label) # end CallSetLabel def CallBind (Id, handler): """ Set Button event handler Id = widget Id handler = new event handler """ gui.Bind (Id, handler) # end CallBind def CallUpdate(Id): """ Update Widget Id Id = widget Id """ gui.Update (Id) # end CallUpdate def CallMessage (Id, message): """ Set label on widget Id Id = widget Id message = either a single string or an array of strings """ gui.Message (Id, message) # end CallMessage
gpl-2.0
7,236,081,742,020,023,000
31.991031
74
0.537447
false
guh/guh-cli
nymea/logs.py
1
20827
# -*- coding: UTF-8 -*- # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # Copyright (C) 2015 - 2018 Simon Stuerz <simon.stuerz@guh.io> # # # # This file is part of nymea-cli. # # # # nymea-cli is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, version 2 of the License. # # # # nymea-cli is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with nymea-cli. If not, see <http://www.gnu.org/licenses/>. # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # import datetime import curses import sys import socket import json import select import telnetlib import string import time import nymea import states import devices import actions import events import rules global stateTypeIdCache global actionTypeIdCache global eventTypeIdCache global deviceIdCache global ruleIdCache global logFilter def log_window(nymeaHost, nymeaPort, params = None): global screen global screenHeight global allLines global topLineNum global highlightLineNum global up global down global commandId global stateTypeIdCache global actionTypeIdCache global eventTypeIdCache global deviceIdCache global ruleIdCache global logFilter stateTypeIdCache = {} actionTypeIdCache = {} eventTypeIdCache = {} deviceIdCache = {} ruleIdCache = {} logFilter = params commandId = 0 # Create notification handler print "Connecting notification handler..." try: tn = telnetlib.Telnet(nymeaHost, nymeaPort) except : print "ERROR: notification socket could not connect the to nymea-server. \n" return None print "...OK \n" #enable_notification(notificationSocket) enable_notification(tn.get_socket()) create_log_window() try: x = None while (x !=ord('\n') and x != 27): socket_list = [sys.stdin, tn.get_socket()] read_sockets, write_sockets, error_sockets = select.select(socket_list , [], []) for sock in read_sockets: # notification messages: if sock == tn.get_socket(): packet = tn.read_until("}\n") packet = json.loads(packet) if 'notification' in packet: if packet['notification'] == "Logging.LogEntryAdded": entry = packet['params']['logEntry'] line = get_log_entry_line(entry, True) # scroll to bottom if curser was at the bottom if topLineNum + highlightLineNum == len(allLines) - 1: if line != None: allLines.append(line) scroll_to_bottom() else: if line != None: allLines.append(line) # flash to tell that there is a new entry curses.flash() draw_screen() else: x = screen.getch() # timeout of 50 ms (screen.timout(50)) if x == curses.KEY_UP: moveUpDown(up) draw_screen() elif x == curses.KEY_DOWN: moveUpDown(down) draw_screen() elif x == ord(' '): scroll_to_bottom() draw_screen() finally: curses.endwin() print "Log window closed." tn.close() print "Notification socket closed." def create_log_window(): global screen global screenHeight global allLines global topLineNum global highlightLineNum global up global down # init up = -1 down = 1 screen = curses.initscr() curses.start_color() curses.init_pair(1,curses.COLOR_BLACK, curses.COLOR_GREEN) curses.noecho() curses.cbreak() screen.keypad(1) screen.timeout(50) screen.clear() screenHeight = curses.LINES - 2 #screen.addstr(1, 2, "Loading...", curses.COLOR_GREEN) #draw_screen() allLines = get_log_entry_lines() scroll_to_bottom() def scroll_to_bottom(): global screenHeight global allLines global topLineNum global highlightLineNum # scroll to bottom if len(allLines) <= screenHeight: topLineNum = 0 highlightLineNum = len(allLines) - 1 else: topLineNum = len(allLines) - screenHeight highlightLineNum = screenHeight - 1 def enable_notification(notifySocket): global commandId params = {} commandObj = {} commandObj['id'] = commandId commandObj['method'] = "JSONRPC.SetNotificationStatus" params['enabled'] = "true" commandObj['params'] = params command = json.dumps(commandObj) + '\n' commandId = commandId + 1 notifySocket.send(command) def draw_screen(): global screen global topLineNum global screenHeight global allLines global highlightLineNum hilightColors = curses.color_pair(1) normalColors = curses.A_NORMAL screen.erase() screen.border(0) curses.curs_set(1) curses.curs_set(0) top = topLineNum bottom = topLineNum + screenHeight for (index,line,) in enumerate(allLines[top:bottom]): linenum = topLineNum + index # highlight current line if index != highlightLineNum: screen.addstr(index + 1, 2, line, normalColors) else: screen.addstr(index + 1, 2, line, hilightColors) screen.refresh() def moveUpDown(direction): global screenHeight global allLines global topLineNum global highlightLineNum global up global down nextLineNum = highlightLineNum + direction # paging if direction == up and highlightLineNum == 0 and topLineNum != 0: topLineNum += up return elif direction == down and nextLineNum == screenHeight and (topLineNum + screenHeight) != len(allLines): topLineNum += down return # scroll highlight line if direction == up and (topLineNum != 0 or highlightLineNum != 0): highlightLineNum = nextLineNum elif direction == down and (topLineNum + highlightLineNum + 1) != len(allLines) and highlightLineNum != screenHeight: highlightLineNum = nextLineNum def list_logEntries(): params = {} lines = [] response = nymea.send_command("Logging.GetLogEntries", params) for i in range(len(response['params']['logEntries'])): line = get_log_entry_line(response['params']['logEntries'][i]) print line def get_log_entry_lines(): global logFilter lines = [] response = nymea.send_command("Logging.GetLogEntries", logFilter) for i in range(len(response['params']['logEntries'])): line = get_log_entry_line(response['params']['logEntries'][i]) lines.append(line) return lines def get_log_entry_line(entry, checkFilter = False): global stateTypeIdCache global actionTypeIdCache global eventTypeIdCache global deviceIdCache global ruleIdCache global logFilter if checkFilter: if not verify_filter(entry): return None if entry['loggingLevel'] == "LoggingLevelInfo": levelString = "(I)" error = "-" else: levelString = "(A)" error = entry['errorCode'] if entry['source'] == "LoggingSourceSystem": deviceName = "nymea server" sourceType = "System" symbolString = "->" sourceName = "Active changed" if entry['active'] == True: value = "active" else: value = "inactive" if entry['source'] == "LoggingSourceStates": typeId = entry['typeId'] sourceType = "State Changed" symbolString = "->" if typeId in stateTypeIdCache: sourceName = stateTypeIdCache[typeId] else: stateType = states.get_stateType(typeId) if stateType is not None: sourceName = stateType["displayName"] stateTypeIdCache[typeId] = sourceName else: sourceName = typeId value = entry['value'] deviceName = get_device_name(entry) if entry['source'] == "LoggingSourceActions": typeId = entry['typeId'] sourceType = "Action executed" symbolString = "()" if typeId in actionTypeIdCache: sourceName = actionTypeIdCache[typeId] else: actionType = actions.get_actionType(typeId) if actionType is not None: sourceName = actionType['displayName'] else: sourceName = typeId actionTypeIdCache[typeId] = sourceName value = entry['value'] deviceName = get_device_name(entry) if entry['source'] == "LoggingSourceEvents": typeId = entry['typeId'] sourceType = "Event triggered" symbolString = "()" if typeId in eventTypeIdCache: sourceName = eventTypeIdCache[typeId] else: eventType = events.get_eventType(typeId) sourceName = eventType['displayName'] eventTypeIdCache[typeId] = sourceName value = entry['value'] deviceName = get_device_name(entry) if entry['source'] == "LoggingSourceRules": typeId = entry['typeId'] if entry['eventType'] == "LoggingEventTypeTrigger": sourceType = "Rule triggered" sourceName = "triggered" symbolString = "()" value = "" elif entry['eventType'] == "LoggingEventTypeActionsExecuted": sourceType = "Rule executed" sourceName = "actions" symbolString = "()" value = "" elif entry['eventType'] == "LoggingEventTypeExitActionsExecuted": sourceType = "Rule executed" sourceName = "exit actions" symbolString = "()" value = "" elif entry['eventType'] == "LoggingEventTypeEnabledChange": sourceType = "Rule changed" sourceName = "enabled" symbolString = "->" if entry['active']: value = "true" else: value = "false" else: sourceType = "Rule changed" symbolString = "()" sourceName = "active" if entry['active']: value = "active" else: value = "inactive" if typeId in ruleIdCache: deviceName = ruleIdCache[typeId] else: rule = rules.get_rule_description(typeId) if rule is not None and 'name' in rule: deviceName = rule['name'] else: deviceName = typeId ruleIdCache[typeId] = deviceName timestamp = datetime.datetime.fromtimestamp(entry['timestamp']/1000) line = "%s %s | %19s | %38s | %20s %3s %20s | %10s" %(levelString.encode('utf-8'), timestamp, sourceType.encode('utf-8'), deviceName.encode('utf-8'), sourceName.encode('utf-8'), symbolString.encode('utf-8'), value.encode('utf-8'), error.encode('utf-8')) return line def create_device_logfilter(): params = {} deviceIds = [] deviceId = devices.select_configured_device() if not deviceId: return None deviceIds.append(deviceId) params['deviceIds'] = deviceIds return params def create_device_state_logfilter(): params = {} deviceIds = [] typeIds = [] loggingSources = [] loggingSources.append("LoggingSourceStates") params['loggingSources'] = loggingSources deviceId = devices.select_configured_device() if not deviceId: return None deviceIds.append(deviceId) params['deviceIds'] = deviceIds device = devices.get_device(deviceId) stateType = states.select_stateType(device['deviceClassId']) if not stateType: return None typeIds.append(stateType['id']) params['typeIds'] = typeIds return params def create_rule_logfilter(): params = {} sources = [] ruleIds = [] rule = rules.select_rule() if not rule: return None ruleIds.append(rule['id']) sources.append("LoggingSourceRules") params['loggingSources'] = sources params['typeIds'] = ruleIds return params def create_last_time_logfilter(minutes): offsetSeconds = 60 * minutes; params = {} timeFilters = [] timeFilter = {} timeFilter['startDate'] = int(time.time()) - offsetSeconds timeFilters.append(timeFilter) params['timeFilters'] = timeFilters return params def create_logfilter(): params = {} boolTypes = ["yes","no"] # Devices selection = nymea.get_selection("Do you want to filter for \"Devices\"? ", boolTypes) if boolTypes[selection] == "yes": deviceIds = [] deviceId = devices.select_configured_device() deviceIds.append(deviceId) finished = False while not finished: selection = nymea.get_selection("Do you want to add an other \"Device\"? ", boolTypes) if boolTypes[selection] == "no": finished = True break deviceId = devices.select_configured_device() if not deviceId: params['deviceIds'] = deviceIds return params deviceIds.append(deviceId) params['deviceIds'] = deviceIds # LoggingSources selection = nymea.get_selection("Do you want to filter for \"LoggingSource\"? ", boolTypes) if boolTypes[selection] == "yes": sources = [] finished = False loggingSources = ["LoggingSourceSystem", "LoggingSourceEvents", "LoggingSourceActions", "LoggingSourceStates", "LoggingSourceRules"] selection = nymea.get_selection("Please select a \"LoggingSource\": ", loggingSources) if selection: sources.append(loggingSources[selection]) else: finished = True while not finished: selection = nymea.get_selection("Do you want to add an other \"LoggingSource\"? ", boolTypes) if boolTypes[selection] == "no": finished = True break selection = get_selection("Please select a \"LoggingSource\": ", loggingSources) if selection: sources.append(loggingSources[selection]) else: finished = True break params['loggingSources'] = sources # LoggingLevel selection = nymea.get_selection("Do you want to filter for \"LoggingLevel\"? ", boolTypes) if boolTypes[selection] == "yes": levels = [] loggingLevels = ["LoggingLevelInfo", "LoggingLevelAlert"] selection = nymea.get_selection("Please select a \"LoggingLevel\": ", loggingLevels) if selection: levels.append(loggingLevels[selection]) params['loggingLevels'] = levels # LoggingEventType selection = nymea.get_selection("Do you want to filter for \"LoggingEventType\"? ", boolTypes) if boolTypes[selection] == "yes": types = [] loggingEventTypes = ["LoggingEventTypeTrigger", "LoggingEventTypeActiveChange", "LoggingEventTypeEnabledChange", "LoggingEventTypeActionsExecuted", "LoggingEventTypeExitActionsExecuted"] selection = nymea.get_selection("Please select a \"LoggingEventType\": ", loggingEventTypes) if selection: types.append(loggingEventTypes[selection]) params['eventTypes'] = types # Value selection = nymea.get_selection("Do you want to filter for certain log \"Values\"? ", boolTypes) if boolTypes[selection] == "yes": values = [] finished = False value = raw_input("Please enter value which should be filtered out: ") values.append(value) while not finished: selection = nymea.get_selection("Do you want to add an other \"Value\"? ", boolTypes) if boolTypes[selection] == "no": finished = True break value = raw_input("Please enter value which should be filtered out: ") values.append(value) params['values'] = values # Times selection = nymea.get_selection("Do you want to add a \"TimeFilter\"? ", boolTypes) if boolTypes[selection] == "yes": timeFilters = [] finished = False timeFilters.append(create_time_filter()) while not finished: selection = nymea.get_selection("Do you want to add an other \"TimeFilter\"? ", boolTypes) if boolTypes[selection] == "no": finished = True break timeFilters.append(create_time_filter()) params['timeFilters'] = timeFilters nymea.print_json_format(params) nymea.debug_stop() return params def create_time_filter(): timeFilter = {} boolTypes = ["yes","no"] selection = nymea.get_selection("Do you want to define a \"Start date\"?", boolTypes) if boolTypes[selection] == "yes": timeFilter['startDate'] = raw_input("Please enter the \"Start date\": ") selection = nymea.get_selection("Do you want to define a \"End date\"?", boolTypes) if boolTypes[selection] == "yes": timeFilter['endDate'] = raw_input("Please enter the \"End date\": ") return timeFilter def get_device_name(entry): global deviceIdCache deviceName = None name = None if entry['deviceId'] in deviceIdCache: deviceName = deviceIdCache[entry['deviceId']] else: device = devices.get_device(entry['deviceId']) deviceName = device['name'] deviceIdCache[entry['deviceId']] = deviceName return deviceName def verify_filter(entry): global logFilter if not logFilter: return True # check if we should filter for deviceIds if 'deviceIds' in logFilter: found = False for deviceId in logFilter['deviceIds']: if deviceId == entry['deviceId']: found = True break if not found: return False # check if we should filter for ruleId if 'typeIds' in logFilter: found = False for ruleId in logFilter['typeIds']: if ruleId == entry['typeId']: found = True break if not found: return False # check if we should filter for loggingSource if 'loggingSources' in logFilter: found = False for loggingSource in logFilter['loggingSources']: if loggingSource == entry['source']: found = True break if not found: return False # check if we should filter for values if 'values' in logFilter: found = False for value in logFilter['values']: if value == entry['value']: found = True break if not found: return False # check if we should filter for loggingLevels if 'loggingLevels' in logFilter: found = False for loggingLevel in logFilter['loggingLevels']: if loggingLevel == entry['loggingLevel']: found = True break if not found: return False return True
gpl-2.0
4,584,651,468,606,077,000
31.798425
257
0.561867
false
pgjones/nusoft
nusoft/credentials.py
1
1609
#!/usr/bin/env python # # Credentials # # Collates and stores in memory user credentials needed for downloads # # Author P G Jones - 2014-03-23 <p.g.jones@qmul.ac.uk> : New file. #################################################################################################### import getpass import logging logger = logging.getLogger(__name__) class Credentials(object): """ Receives and stores credentials. :param _username: Download username :param _password: Download password to go with a username :param _token: Instead of username and password use token. """ def __init__(self, token=None): """ Initialise the credentials. :param token: token to use """ self._token = token self._username = None self._password = None if token is not None: logger.debug("Using a token") def authenticate(self): """ Returns either a token or the username and password. :return: token or username password tuple. """ if self._token is not None: return self._token elif self._username is not None: return (self._username, self._password) else: self._username = raw_input("Username:").replace('\n', '') self._password = getpass.getpass("Password:").replace('\n', '') return (self._username, self._password) def reset(self): """ Reset the known username and password.""" self._username = None self._password = None logger.warning("Username/password has been reset")
mit
-1,743,196,074,267,851,000
33.234043
100
0.567433
false
irvined1982/lavaflow
setup.py
1
1269
import os from setuptools import setup README = open(os.path.join(os.path.dirname(__file__), 'README')).read() setup( zip_safe=False, name='django-lavaflow', version='1.1', packages=['lavaFlow'], include_package_data=True, license="GPL 3", description="LavaFlow creates useful reports on the usage of high performance compute clusters. LavaFlow takes data from the batch scheduling system, monitoring, and other tooling, and creates reports that help administrators, managers, and end users better understand their cluster environment.", long_description=README, url="http://ay60dxg.com/projects/lavaflow/", author="David Irvine", author_email="irvined@gmail.com", scripts=['import/lava-import-gridengine.py','import/lava-import-openlava'], classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'Programming Language :: Python', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Intended Audience :: Science/Research', 'Intended Audience :: System Administrators', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Information Analysis', ], )
gpl-3.0
8,221,223,443,358,147,000
38.65625
299
0.713948
false
pytorch/fairseq
tests/test_dataset.py
1
2916
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import unittest from typing import Sequence from fairseq.data import LanguagePairDataset, ListDataset, RoundRobinZipDatasets from tests.test_train import mock_dict def lang_pair_dataset(lengths: Sequence[int]) -> LanguagePairDataset: tokens = [[i] * l for i, l in enumerate(lengths)] return LanguagePairDataset(ListDataset(tokens), lengths, mock_dict()) def sample(id: int, length: int): return {"id": id, "source": [id] * length, "target": None} class TestDataset(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_round_robin_zip_datasets(self): long_dataset = lang_pair_dataset([10, 9, 8, 11]) short_dataset = lang_pair_dataset([11, 9]) dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset}) # Dataset is now sorted by sentence length dataset.ordered_indices() assert dataset.longest_dataset is long_dataset self.assertEqual(dict(dataset[0]), {"a": sample(2, 8), "b": sample(1, 9)}) # The item 2 of dataset 'a' is with item (2 % 2 = 0) of dataset 'b' self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(1, 9)}) def test_round_robin_zip_datasets_filtered(self): long_dataset = lang_pair_dataset([10, 20, 8, 11, 1000, 7, 12]) short_dataset = lang_pair_dataset([11, 20, 9, 1000]) dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset}) # Dataset is now sorted by sentence length idx = dataset.ordered_indices() idx, _ = dataset.filter_indices_by_size(idx, {"a": 19, "b": 900}) self.assertEqual(list(idx), [0, 1, 2, 3, 4]) self.assertEqual(dict(dataset[0]), {"a": sample(5, 7), "b": sample(2, 9)}) self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(1, 20)}) self.assertEqual(dict(dataset[4]), {"a": sample(6, 12), "b": sample(0, 11)}) def test_round_robin_zip_datasets_filtered_with_tuple(self): long_dataset = lang_pair_dataset([10, 20, 8, 11, 1000, 7, 12]) short_dataset = lang_pair_dataset([11, 20, 9, 1000]) dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset}) # Dataset is now sorted by sentence length idx = dataset.ordered_indices() idx, _ = dataset.filter_indices_by_size(idx, 19) self.assertEqual(list(idx), [0, 1, 2, 3, 4]) self.assertEqual(dict(dataset[0]), {"a": sample(5, 7), "b": sample(2, 9)}) self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(2, 9)}) self.assertEqual(dict(dataset[4]), {"a": sample(6, 12), "b": sample(2, 9)})
mit
1,653,650,448,822,139,000
43.181818
84
0.632373
false
IuryAlves/code-challenge
app/load_data.py
1
1329
#!/usr/bin/env python # coding: utf-8 from __future__ import ( print_function, unicode_literals, absolute_import ) import argparse import json import os def get_path(): return unicode(os.path.abspath('.')) def parse_args(): _parser = argparse.ArgumentParser() _parser.add_argument('--fixture', type=str, help='fixture file to load', default='properties.json') _parser.add_argument('--fixture_folder', type=str, default='models/fixtures', help='where fixtures are stored.' ) return _parser.parse_args() def main(base_path): properties_to_save = [] args = parse_args() path = os.path.sep.join([base_path, 'app', args.fixture_folder, args.fixture]) with open(path) as file_: data = json.load(file_) properties = data['properties'] for property_ in properties: property_.pop('id') properties_to_save.append(Property(**property_)) Property.objects.insert(properties_to_save) return len(properties_to_save) if __name__ == '__main__': from app.models.properties import Property base_path = get_path() out = main(base_path) print("{} objects saved".format(out))
mit
-2,716,428,965,792,732,700
25.6
103
0.576373
false
LMSlay/wiper
modules/radare.py
1
2920
# -*- coding: utf-8 -*- # This file is part of Viper - https://github.com/botherder/viper # See the file 'LICENSE' for copying permission. import os import sys import getopt from viper.common.out import * from viper.common.abstracts import Module from viper.core.session import __sessions__ ext = ".bin" run_radare = {'linux2': 'r2', 'darwin': 'r2', 'win32': 'r2'} class Radare(Module): cmd = 'r2' description = 'Start Radare2' authors = ['dukebarman'] def __init__(self): self.is_64b = False self.ext = '' self.server = '' def open_radare(self, filename): directory = filename + ".dir" if not os.path.exists(directory): os.makedirs(directory) destination = directory + "/executable" + self.ext if not os.path.lexists(destination): os.link(filename, destination) command_line = '{} {}{}'.format(run_radare[sys.platform], self.server, destination) os.system(command_line) def run(self): if not __sessions__.is_set(): self.log('error', "No session opened") return def usage(): self.log('', "usage: r2 [-h] [-s]") def help(): usage() self.log('', "") self.log('', "Options:") self.log('', "\t--help (-h)\tShow this help message") self.log('', "\t--webserver (-w)\tStart web-frontend for radare2") self.log('', "") try: opts, argv = getopt.getopt(self.args[0:], 'hw', ['help', 'webserver']) except getopt.GetoptError as e: self.log('', e) return for opt, value in opts: if opt in ('-h', '--help'): help() return elif opt in ('-w', '--webserver'): self.server = "-c=H " filetype = __sessions__.current.file.type if 'x86-64' in filetype: self.is_64b = True arch = '64' if self.is_64b else '32' if 'DLL' in filetype: self.ext = '.dll' to_print = [arch, 'bit DLL (Windows)'] if "native" in filetype: to_print.append('perhaps a driver (.sys)') self.log('info', ' '.join(to_print)) elif 'PE32' in filetype: self.ext = '.exe' self.log('info', ' '.join([arch, 'bit executable (Windows)'])) elif 'shared object' in filetype: self.ext = '.so' self.log('info', ' '.join([arch, 'bit shared object (linux)'])) elif 'ELF' in filetype: self.ext = '' self.log('info', ' '.join([arch, 'bit executable (linux)'])) else: self.log('error', "Unknown binary") try: self.open_radare(__sessions__.current.file.path) except: self.log('error', "Unable to start Radare2")
bsd-3-clause
5,533,463,339,960,898,000
28.795918
91
0.510616
false
ghetzel/webfriend
webfriend/rpc/base.py
1
2491
""" Implementation of the Chrome Remote DevTools debugging protocol. See: https://chromedevtools.github.io/devtools-protocol """ from __future__ import absolute_import from webfriend.rpc.event import Event from uuid import uuid4 from collections import OrderedDict import logging class Base(object): supports_events = True domain = None def __init__(self, tab): if self.domain is None: raise ValueError("Cannot instantiate an RPC proxy without a domain class property.") self.tab = tab self.callbacks = {} def initialize(self): pass def call(self, method, expect_reply=True, reply_timeout=None, **params): return self.tab.rpc( '{}.{}'.format(self.domain, method), expect_reply=expect_reply, reply_timeout=reply_timeout, **params ) def enable(self): if self.supports_events: self.call('enable') def disable(self): if self.supports_events: self.call('disable') def call_boolean_response(self, method, field='result', **kwargs): if self.call(method, **kwargs).get(field) is True: return True return False def on(self, method, callback): # normalize method name if not method.startswith(self.domain + '.'): method = '{}.{}'.format(self.domain, method) # create handler dict if we need to if method not in self.callbacks: self.callbacks[method] = OrderedDict() callback_id = '{}.event_{}'.format(self.domain, uuid4()) self.callbacks[method][callback_id] = callback logging.debug('Registered event handler {} for event {}'.format( callback_id, method )) return callback_id def remove_handler(self, callback_id): for _, callbacks in self.callbacks.items(): for id, _ in callbacks.items(): if callback_id == id: del callbacks[callback_id] return True return False def trigger(self, method, payload=None): event = Event(self, method, payload) if str(event) in self.callbacks: for callback_id, callback in self.callbacks[str(event)].items(): if callable(callback): response = callback(event) if response is False: break return event
bsd-2-clause
-6,752,092,870,117,730,000
27.306818
96
0.579285
false
lgarren/spack
lib/spack/spack/test/spec_syntax.py
1
20244
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import pytest import shlex import spack import spack.spec as sp from spack.parse import Token from spack.spec import Spec, parse, parse_anonymous_spec from spack.spec import SpecParseError, RedundantSpecError from spack.spec import AmbiguousHashError, InvalidHashError, NoSuchHashError from spack.spec import DuplicateArchitectureError, DuplicateVariantError from spack.spec import DuplicateDependencyError, DuplicateCompilerSpecError # Sample output for a complex lexing. complex_lex = [Token(sp.ID, 'mvapich_foo'), Token(sp.DEP), Token(sp.ID, '_openmpi'), Token(sp.AT), Token(sp.ID, '1.2'), Token(sp.COLON), Token(sp.ID, '1.4'), Token(sp.COMMA), Token(sp.ID, '1.6'), Token(sp.PCT), Token(sp.ID, 'intel'), Token(sp.AT), Token(sp.ID, '12.1'), Token(sp.COLON), Token(sp.ID, '12.6'), Token(sp.ON), Token(sp.ID, 'debug'), Token(sp.OFF), Token(sp.ID, 'qt_4'), Token(sp.DEP), Token(sp.ID, 'stackwalker'), Token(sp.AT), Token(sp.ID, '8.1_1e')] # Another sample lexer output with a kv pair. kv_lex = [Token(sp.ID, 'mvapich_foo'), Token(sp.ID, 'debug'), Token(sp.EQ), Token(sp.VAL, '4'), Token(sp.DEP), Token(sp.ID, '_openmpi'), Token(sp.AT), Token(sp.ID, '1.2'), Token(sp.COLON), Token(sp.ID, '1.4'), Token(sp.COMMA), Token(sp.ID, '1.6'), Token(sp.PCT), Token(sp.ID, 'intel'), Token(sp.AT), Token(sp.ID, '12.1'), Token(sp.COLON), Token(sp.ID, '12.6'), Token(sp.ON), Token(sp.ID, 'debug'), Token(sp.OFF), Token(sp.ID, 'qt_4'), Token(sp.DEP), Token(sp.ID, 'stackwalker'), Token(sp.AT), Token(sp.ID, '8.1_1e')] class TestSpecSyntax(object): # ======================================================================== # Parse checks # ======================================================================== def check_parse(self, expected, spec=None, remove_arch=True): """Assert that the provided spec is able to be parsed. If this is called with one argument, it assumes that the string is canonical (i.e., no spaces and ~ instead of - for variants) and that it will convert back to the string it came from. If this is called with two arguments, the first argument is the expected canonical form and the second is a non-canonical input to be parsed. """ if spec is None: spec = expected output = sp.parse(spec) parsed = (" ".join(str(spec) for spec in output)) assert expected == parsed def check_lex(self, tokens, spec): """Check that the provided spec parses to the provided token list.""" spec = shlex.split(spec) lex_output = sp.SpecLexer().lex(spec) for tok, spec_tok in zip(tokens, lex_output): if tok.type == sp.ID or tok.type == sp.VAL: assert tok == spec_tok else: # Only check the type for non-identifiers. assert tok.type == spec_tok.type def _check_raises(self, exc_type, items): for item in items: with pytest.raises(exc_type): Spec(item) # ======================================================================== # Parse checks # ======================================================================== def test_package_names(self): self.check_parse("mvapich") self.check_parse("mvapich_foo") self.check_parse("_mvapich_foo") def test_anonymous_specs(self): self.check_parse("%intel") self.check_parse("@2.7") self.check_parse("^zlib") self.check_parse("+foo") self.check_parse("arch=test-None-None", "platform=test") self.check_parse('@2.7:') def test_anonymous_specs_with_multiple_parts(self): # Parse anonymous spec with multiple tokens self.check_parse('@4.2: languages=go', 'languages=go @4.2:') self.check_parse('@4.2: languages=go') def test_simple_dependence(self): self.check_parse("openmpi^hwloc") self.check_parse("openmpi^hwloc^libunwind") def test_dependencies_with_versions(self): self.check_parse("openmpi^hwloc@1.2e6") self.check_parse("openmpi^hwloc@1.2e6:") self.check_parse("openmpi^hwloc@:1.4b7-rc3") self.check_parse("openmpi^hwloc@1.2e6:1.4b7-rc3") def test_multiple_specs(self): self.check_parse("mvapich emacs") def test_multiple_specs_after_kv(self): self.check_parse('mvapich cppflags="-O3 -fPIC" emacs') self.check_parse('mvapich cflags="-O3" emacs', 'mvapich cflags=-O3 emacs') def test_multiple_specs_long_second(self): self.check_parse('mvapich emacs@1.1.1%intel cflags="-O3"', 'mvapich emacs @1.1.1 %intel cflags=-O3') self.check_parse('mvapich cflags="-O3 -fPIC" emacs^ncurses%intel') def test_full_specs(self): self.check_parse( "mvapich_foo" "^_openmpi@1.2:1.4,1.6%intel@12.1+debug~qt_4" "^stackwalker@8.1_1e") self.check_parse( "mvapich_foo" "^_openmpi@1.2:1.4,1.6%intel@12.1 debug=2 ~qt_4" "^stackwalker@8.1_1e") self.check_parse( 'mvapich_foo' '^_openmpi@1.2:1.4,1.6%intel@12.1 cppflags="-O3" +debug~qt_4' '^stackwalker@8.1_1e') self.check_parse( "mvapich_foo" "^_openmpi@1.2:1.4,1.6%intel@12.1 debug=2 ~qt_4" "^stackwalker@8.1_1e arch=test-redhat6-x86_32") def test_canonicalize(self): self.check_parse( "mvapich_foo" "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug~qt_4" "^stackwalker@8.1_1e", "mvapich_foo " "^_openmpi@1.6,1.2:1.4%intel@12.1:12.6+debug~qt_4 " "^stackwalker@8.1_1e") self.check_parse( "mvapich_foo" "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug~qt_4" "^stackwalker@8.1_1e", "mvapich_foo " "^stackwalker@8.1_1e " "^_openmpi@1.6,1.2:1.4%intel@12.1:12.6~qt_4+debug") self.check_parse( "x^y@1,2:3,4%intel@1,2,3,4+a~b+c~d+e~f", "x ^y~f+e~d+c~b+a@4,2:3,1%intel@4,3,2,1") self.check_parse( "x arch=test-redhat6-None " "^y arch=test-None-x86_64 " "^z arch=linux-None-None", "x os=fe " "^y target=be " "^z platform=linux") self.check_parse( "x arch=test-debian6-x86_64 " "^y arch=test-debian6-x86_64", "x os=default_os target=default_target " "^y os=default_os target=default_target") self.check_parse("x^y", "x@: ^y@:") def test_parse_errors(self): errors = ['x@@1.2', 'x ^y@@1.2', 'x@1.2::', 'x::'] self._check_raises(SpecParseError, errors) def _check_hash_parse(self, spec): """Check several ways to specify a spec by hash.""" # full hash self.check_parse(str(spec), '/' + spec.dag_hash()) # partial hash self.check_parse(str(spec), '/ ' + spec.dag_hash()[:5]) # name + hash self.check_parse(str(spec), spec.name + '/' + spec.dag_hash()) # name + version + space + partial hash self.check_parse( str(spec), spec.name + '@' + str(spec.version) + ' /' + spec.dag_hash()[:6]) def test_spec_by_hash(self, database): specs = database.mock.db.query() assert len(specs) # make sure something's in the DB for spec in specs: self._check_hash_parse(spec) def test_dep_spec_by_hash(self, database): mpileaks_zmpi = database.mock.db.query_one('mpileaks ^zmpi') zmpi = database.mock.db.query_one('zmpi') fake = database.mock.db.query_one('fake') assert 'fake' in mpileaks_zmpi assert 'zmpi' in mpileaks_zmpi mpileaks_hash_fake = sp.Spec('mpileaks ^/' + fake.dag_hash()) assert 'fake' in mpileaks_hash_fake assert mpileaks_hash_fake['fake'] == fake mpileaks_hash_zmpi = sp.Spec( 'mpileaks %' + str(mpileaks_zmpi.compiler) + ' ^ / ' + zmpi.dag_hash()) assert 'zmpi' in mpileaks_hash_zmpi assert mpileaks_hash_zmpi['zmpi'] == zmpi assert mpileaks_hash_zmpi.compiler == mpileaks_zmpi.compiler mpileaks_hash_fake_and_zmpi = sp.Spec( 'mpileaks ^/' + fake.dag_hash()[:4] + '^ / ' + zmpi.dag_hash()[:5]) assert 'zmpi' in mpileaks_hash_fake_and_zmpi assert mpileaks_hash_fake_and_zmpi['zmpi'] == zmpi assert 'fake' in mpileaks_hash_fake_and_zmpi assert mpileaks_hash_fake_and_zmpi['fake'] == fake def test_multiple_specs_with_hash(self, database): mpileaks_zmpi = database.mock.db.query_one('mpileaks ^zmpi') callpath_mpich2 = database.mock.db.query_one('callpath ^mpich2') # name + hash + separate hash specs = sp.parse('mpileaks /' + mpileaks_zmpi.dag_hash() + '/' + callpath_mpich2.dag_hash()) assert len(specs) == 2 # 2 separate hashes specs = sp.parse('/' + mpileaks_zmpi.dag_hash() + '/' + callpath_mpich2.dag_hash()) assert len(specs) == 2 # 2 separate hashes + name specs = sp.parse('/' + mpileaks_zmpi.dag_hash() + '/' + callpath_mpich2.dag_hash() + ' callpath') assert len(specs) == 3 # hash + 2 names specs = sp.parse('/' + mpileaks_zmpi.dag_hash() + ' callpath' + ' callpath') assert len(specs) == 3 # hash + name + hash specs = sp.parse('/' + mpileaks_zmpi.dag_hash() + ' callpath' + ' / ' + callpath_mpich2.dag_hash()) assert len(specs) == 2 def test_ambiguous_hash(self, database): x1 = Spec('a') x1._hash = 'xy' x1._concrete = True x2 = Spec('a') x2._hash = 'xx' x2._concrete = True database.mock.db.add(x1, spack.store.layout) database.mock.db.add(x2, spack.store.layout) # ambiguity in first hash character self._check_raises(AmbiguousHashError, ['/x']) # ambiguity in first hash character AND spec name self._check_raises(AmbiguousHashError, ['a/x']) def test_invalid_hash(self, database): mpileaks_zmpi = database.mock.db.query_one('mpileaks ^zmpi') zmpi = database.mock.db.query_one('zmpi') mpileaks_mpich = database.mock.db.query_one('mpileaks ^mpich') mpich = database.mock.db.query_one('mpich') # name + incompatible hash self._check_raises(InvalidHashError, [ 'zmpi /' + mpich.dag_hash(), 'mpich /' + zmpi.dag_hash()]) # name + dep + incompatible hash self._check_raises(InvalidHashError, [ 'mpileaks ^mpich /' + mpileaks_zmpi.dag_hash(), 'mpileaks ^zmpi /' + mpileaks_mpich.dag_hash()]) def test_nonexistent_hash(self, database): """Ensure we get errors for nonexistant hashes.""" specs = database.mock.db.query() # This hash shouldn't be in the test DB. What are the odds :) no_such_hash = 'aaaaaaaaaaaaaaa' hashes = [s._hash for s in specs] assert no_such_hash not in [h[:len(no_such_hash)] for h in hashes] self._check_raises(NoSuchHashError, [ '/' + no_such_hash, 'mpileaks /' + no_such_hash]) def test_redundant_spec(self, database): """Check that redundant spec constraints raise errors. TODO (TG): does this need to be an error? Or should concrete specs only raise errors if constraints cause a contradiction? """ mpileaks_zmpi = database.mock.db.query_one('mpileaks ^zmpi') callpath_zmpi = database.mock.db.query_one('callpath ^zmpi') dyninst = database.mock.db.query_one('dyninst') mpileaks_mpich2 = database.mock.db.query_one('mpileaks ^mpich2') redundant_specs = [ # redudant compiler '/' + mpileaks_zmpi.dag_hash() + '%' + str(mpileaks_zmpi.compiler), # redudant version 'mpileaks/' + mpileaks_mpich2.dag_hash() + '@' + str(mpileaks_mpich2.version), # redundant dependency 'callpath /' + callpath_zmpi.dag_hash() + '^ libelf', # redundant flags '/' + dyninst.dag_hash() + ' cflags="-O3 -fPIC"'] self._check_raises(RedundantSpecError, redundant_specs) def test_duplicate_variant(self): duplicates = [ 'x@1.2+debug+debug', 'x ^y@1.2+debug debug=true', 'x ^y@1.2 debug=false debug=true', 'x ^y@1.2 debug=false ~debug' ] self._check_raises(DuplicateVariantError, duplicates) def test_duplicate_dependency(self): self._check_raises(DuplicateDependencyError, ["x ^y ^y"]) def test_duplicate_compiler(self): duplicates = [ "x%intel%intel", "x%intel%gcc", "x%gcc%intel", "x ^y%intel%intel", "x ^y%intel%gcc", "x ^y%gcc%intel" ] self._check_raises(DuplicateCompilerSpecError, duplicates) def test_duplicate_architecture(self): duplicates = [ "x arch=linux-rhel7-x86_64 arch=linux-rhel7-x86_64", "x arch=linux-rhel7-x86_64 arch=linux-rhel7-ppc64le", "x arch=linux-rhel7-ppc64le arch=linux-rhel7-x86_64", "y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-x86_64", "y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-ppc64le" ] self._check_raises(DuplicateArchitectureError, duplicates) def test_duplicate_architecture_component(self): duplicates = [ "x os=fe os=fe", "x os=fe os=be", "x target=fe target=fe", "x target=fe target=be", "x platform=test platform=test", "x os=fe platform=test target=fe os=fe", "x target=be platform=test os=be os=fe" ] self._check_raises(DuplicateArchitectureError, duplicates) # ======================================================================== # Lex checks # ======================================================================== def test_ambiguous(self): # This first one is ambiguous because - can be in an identifier AND # indicate disabling an option. with pytest.raises(AssertionError): self.check_lex( complex_lex, "mvapich_foo" "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug-qt_4" "^stackwalker@8.1_1e" ) # The following lexes are non-ambiguous (add a space before -qt_4) # and should all result in the tokens in complex_lex def test_minimal_spaces(self): self.check_lex( complex_lex, "mvapich_foo" "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug -qt_4" "^stackwalker@8.1_1e") self.check_lex( complex_lex, "mvapich_foo" "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug~qt_4" "^stackwalker@8.1_1e") def test_spaces_between_dependences(self): self.check_lex( complex_lex, "mvapich_foo " "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug -qt_4 " "^stackwalker @ 8.1_1e") self.check_lex( complex_lex, "mvapich_foo " "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug~qt_4 " "^stackwalker @ 8.1_1e") def test_spaces_between_options(self): self.check_lex( complex_lex, "mvapich_foo " "^_openmpi @1.2:1.4,1.6 %intel @12.1:12.6 +debug -qt_4 " "^stackwalker @8.1_1e") def test_way_too_many_spaces(self): self.check_lex( complex_lex, "mvapich_foo " "^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 " "^ stackwalker @ 8.1_1e") self.check_lex( complex_lex, "mvapich_foo " "^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug ~ qt_4 " "^ stackwalker @ 8.1_1e") def test_kv_with_quotes(self): self.check_lex( kv_lex, "mvapich_foo debug='4' " "^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 " "^ stackwalker @ 8.1_1e") self.check_lex( kv_lex, 'mvapich_foo debug="4" ' "^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 " "^ stackwalker @ 8.1_1e") self.check_lex( kv_lex, "mvapich_foo 'debug = 4' " "^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 " "^ stackwalker @ 8.1_1e") def test_kv_without_quotes(self): self.check_lex( kv_lex, "mvapich_foo debug=4 " "^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 " "^ stackwalker @ 8.1_1e") def test_kv_with_spaces(self): self.check_lex( kv_lex, "mvapich_foo debug = 4 " "^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 " "^ stackwalker @ 8.1_1e") self.check_lex( kv_lex, "mvapich_foo debug =4 " "^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 " "^ stackwalker @ 8.1_1e") self.check_lex( kv_lex, "mvapich_foo debug= 4 " "^ _openmpi @1.2 : 1.4 , 1.6 % intel @ 12.1 : 12.6 + debug - qt_4 " "^ stackwalker @ 8.1_1e") @pytest.mark.parametrize('spec,anon_spec,spec_name', [ ('openmpi languages=go', 'languages=go', 'openmpi'), ('openmpi @4.6:', '@4.6:', 'openmpi'), ('openmpi languages=go @4.6:', 'languages=go @4.6:', 'openmpi'), ('openmpi @4.6: languages=go', '@4.6: languages=go', 'openmpi'), ]) def test_parse_anonymous_specs(spec, anon_spec, spec_name): expected = parse(spec) spec = parse_anonymous_spec(anon_spec, spec_name) assert len(expected) == 1 assert spec in expected
lgpl-2.1
-8,615,010,693,867,667,000
35.475676
79
0.528453
false
kennydo/rename-archive-extension
Nautilus/rename_archive.py
1
8817
from gi.repository import Nautilus, GObject, Gtk import functools import os import os.path import urllib import urlparse import zipfile try: import rarfile except ImportError: rarfile = None if rarfile: # The default separator is '\\', which is different from what zipfile uses rarfile.PATH_SEP = '/' # I put these in a tuple so that they don't accidentally get mutated. ZIP_MIME_TYPES = tuple(['application/zip', 'application/x-zip', 'application/zip-compressed']) RAR_MIME_TYPES = tuple(['application/rar', 'application/x-rar', 'application/x-rar-compressed']) def get_file_path(file_info): """Returns the simple file path from a Nautilus.FileInfo. Gets the "/path/to/file" part from "file:///path/to/file". Args: file_info: a Nautilus.FileInfo instance Returns: A string representing a Unix path """ uri = file_info.get_uri() return urllib.unquote(urlparse.urlparse(uri).path) def get_new_file_path(archive_path, directory_name): """Gets the proposed new path for an archive if it's renamed Creates the full path of an archive if it is renamed after a directory. It keeps the path of directories leading up to the base name, as well as the file extension. Calling this function with "/path/to/file.zip" and "dir-name" would return: "/path/to/dir-name.zip". Args: archive_path: A string representing the full path of the archive directory_name: String value of the directory we want to rename this archive after. Returns: A string of the proposed file path after the archive has been renamed after the given directory name. """ if '.' in archive_path: extension = archive_path.rsplit('.', 1)[1] base_name = directory_name + '.' + extension else: base_name = directory_name return os.path.join(os.path.dirname(archive_path), base_name) def lru_cache(size): """Simple LRU cache""" def outer(f): prev_inputs = list() prev_outputs = dict() @functools.wraps(f) def wrapper(function_input): if function_input in prev_inputs: return prev_outputs[function_input] function_output = f(function_input) if len(prev_inputs) >= size: dead_path = prev_inputs[0] del prev_inputs[0] del prev_outputs[dead_path] prev_inputs.append(function_input) prev_outputs[function_input] = function_output return function_output return wrapper return outer @lru_cache(32) def get_zip_directory_names(filename): """Gets the list of directories inside a ZIP archive Reads the directory names inside of a ZIP archive, and returns a list of each directory name (without its parent directories). Args: filename: A string that can be a relative filename or file path (it doesn't matter as long as this script can read it) of a ZIP file Returns: A list of directory name strings. """ names = list() try: with zipfile.ZipFile(filename, 'r') as zip_file: names = [fname for fname in zip_file.namelist() if fname.endswith('/')] except zipfile.BadZipfile as e: print(e) directory_names = [os.path.basename(dir_name[:-1]) for dir_name in names] return directory_names @lru_cache(32) def get_rar_directory_names(filename): """Gets the list of directories inside a RAR archive Reads the directory names inside of a RAR archive, and returns a list of each directory name (without its parent directories). Args: filename: A string that can be a relative filename or file path (it doesn't matter as long as this script can read it) of a ZIP file Returns: A list of directory name strings. """ names = list() try: with rarfile.RarFile(filename, 'r') as rar_file: names = [info.filename for info in rar_file.infolist() if info.isdir()] except rarfile.Error as e: print(e) directory_names = [os.path.basename(dir_name) for dir_name in names] return directory_names class RenameDialog(GObject.GObject): """Wrapped Gtk Message Dialog class""" def __init__(self, window, original_name, new_name): self.dialog = Gtk.MessageDialog(window, 0, Gtk.MessageType.QUESTION, Gtk.ButtonsType.YES_NO, "Rename Archive?") self.dialog.format_secondary_text( "Do you want to rename\n\"{0}\" to\n\"{1}\"".format( original_name, new_name)) def run(self): self.response = self.dialog.run() def destroy(self): self.dialog.destroy() class RenameArchiveProvider(GObject.GObject, Nautilus.MenuProvider): """Creates a submenu to rename archives after the name of a directory within the archive. """ def __init__(self): self.supported_mime_types = list(ZIP_MIME_TYPES) if rarfile: self.supported_mime_types += list(RAR_MIME_TYPES) def rename_directory_menuitem_cb(self, menu, cb_parameters): """Callback for when the user clicks on a directory name to rename an archive after. This displays a dialog that the user responds to with a Yes or No. If the user clicks Yes, then this attempts to rename the file. Args: menu: the Nautilus.Menu that was the source of the click cb_parameters: a tuple of type (Nautilus.FileInfo, Gtk.Window, string) Returns: Nothing. """ file_info, window, directory_name = cb_parameters if file_info.is_gone() or not file_info.can_write(): return old_path = get_file_path(file_info) old_name = os.path.basename(old_path) new_path = get_new_file_path(old_path, directory_name) new_name = os.path.basename(new_path) dialog = RenameDialog(window, old_name, new_name) dialog.run() dialog.destroy() if dialog.response == Gtk.ResponseType.YES: try: os.rename(old_path, new_path) except os.OSError as e: print(e) def get_file_items(self, window, files): if len(files) != 1: return selected_file = files[0] if selected_file.get_uri_scheme() != 'file': # Not sure which URIs zipfile supports reading from return mime_type = selected_file.get_mime_type() if mime_type in self.supported_mime_types: top_menuitem = Nautilus.MenuItem( name='RenameArchiveProvider::Rename Archive', label='Rename Archive', tip='Rename archive based on its directory names', icon='') names_menu = Nautilus.Menu() top_menuitem.set_submenu(names_menu) # create the submenu items file_path = get_file_path(selected_file) if mime_type in ZIP_MIME_TYPES: directory_names = get_zip_directory_names(file_path) elif mime_type in RAR_MIME_TYPES: directory_names = get_rar_directory_names(file_path) else: directory_names = None if not directory_names: no_directories_menuitem = Nautilus.MenuItem( name='RenameArchiveProvider::No Directories', label='No directory names found', tip='', icon='') names_menu.append_item(no_directories_menuitem) else: for directory_name in directory_names: name = 'RenameArchiveProvider::Directory::' + \ directory_name label = 'Rename to "' + \ directory_name.replace('_', '__') + '"' # we have to perform the underscore replacement in the label to get it to show up dir_menuitem = Nautilus.MenuItem( name=name, label=label, tip=label, icon='') dir_menuitem.connect( 'activate', self.rename_directory_menuitem_cb, (selected_file, window, directory_name)) names_menu.append_item(dir_menuitem) return [top_menuitem] else: return
mit
-8,911,151,886,611,844,000
32.524715
101
0.58047
false
chrys87/orca-beep
test/keystrokes/firefox/aria_button.py
1
1801
#!/usr/bin/python """Test of ARIA button presentation.""" from macaroon.playback import * import utils sequence = MacroSequence() #sequence.append(WaitForDocLoad()) sequence.append(PauseAction(5000)) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("Tab")) sequence.append(utils.AssertPresentationAction( "1. Tab to Tracking number text entry", ["BRAILLE LINE: 'Tracking number $l'", " VISIBLE: 'Tracking number $l', cursor=17", "BRAILLE LINE: 'Focus mode'", " VISIBLE: 'Focus mode', cursor=0", "SPEECH OUTPUT: 'Tracking number entry'", "SPEECH OUTPUT: 'Focus mode' voice=system"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("Tab")) sequence.append(utils.AssertPresentationAction( "2. Tab to Check Now push button", ["BRAILLE LINE: 'Tracking number $l'", " VISIBLE: 'Tracking number $l', cursor=17", "BRAILLE LINE: 'Check Now push button'", " VISIBLE: 'Check Now push button', cursor=1", "BRAILLE LINE: 'Browse mode'", " VISIBLE: 'Browse mode', cursor=0", "SPEECH OUTPUT: 'Check Now push button Check to see if your order has been shipped.'", "SPEECH OUTPUT: 'Browse mode' voice=system"])) sequence.append(utils.StartRecordingAction()) sequence.append(KeyComboAction("KP_Enter")) sequence.append(utils.AssertPresentationAction( "3. Basic whereamI", ["BRAILLE LINE: 'Check Now push button'", " VISIBLE: 'Check Now push button', cursor=1", "BRAILLE LINE: 'Check Now push button'", " VISIBLE: 'Check Now push button', cursor=1", "SPEECH OUTPUT: 'Check Now push button Check to see if your order has been shipped.'"])) sequence.append(utils.AssertionSummaryAction()) sequence.start()
lgpl-2.1
-427,095,397,982,902,140
36.520833
93
0.681843
false
YangWanjun/areaparking
utils/constants.py
1
11566
# -*- coding: utf-8 -*- from __future__ import unicode_literals SYSTEM_NAME = "エリアパーキング" END_DATE = '9999-12-31' DATABASE_DEFAULT = "default" DATABASE_REVOLUTION = "fk5dtsql" MIME_TYPE_EXCEL = 'application/excel' MIME_TYPE_PDF = 'application/pdf' MIME_TYPE_ZIP = 'application/zip' MIME_TYPE_HTML = 'text/html' CONFIG_GROUP_SYSTEM = 'system' CONFIG_GROUP_GOOGLE = 'google' CONFIG_GROUP_YAHOO = 'yahoo' CONFIG_GROUP_EMAIL = 'email' CONFIG_GROUP_ADJUST_SIZE = 'size' CONFIG_EMAIL_ADDRESS = 'email_address' CONFIG_EMAIL_SMTP_HOST = 'email_smtp_host' CONFIG_EMAIL_SMTP_PORT = 'email_smtp_port' CONFIG_EMAIL_PASSWORD = 'email_password' CONFIG_CIRCLE_RADIUS = 'circle_radius' CONFIG_DOMAIN_NAME = 'domain_name' CONFIG_PAGE_SIZE = 'page_size' CONFIG_DECIMAL_TYPE = 'decimal_type' CONFIG_CONSUMPTION_TAX_RATE = 'consumption_tax_rate' CONFIG_CAR_LENGTH_ADJUST = 'car_length_adjust' CONFIG_CAR_WIDTH_ADJUST = 'car_width_adjust' CONFIG_CAR_HEIGHT_ADJUST = 'car_height_adjust' CONFIG_CAR_WEIGHT_ADJUST = 'car_weight_adjust' CONFIG_URL_TIMEOUT = 'url_timeout' CONFIG_GCM_URL = 'gcm_url' CONFIG_FIREBASE_SERVERKEY = 'firebase_serverkey' CONFIG_GOOGLE_MAP_KEY = 'google_map_key' CONFIG_YAHOO_APP_KEY = 'yahoo_app_id' CONFIG_FURIGANA_SERVICE_URL = 'furigana_service_url' CONFIG_PARKING_LOT_KEY_ALERT_PERCENT = 'parking_lot_key_alert_percent' CONFIG_SIMPLE_SUBSCRIPTION_PERSIST_TIME = 'simple_subscription_persist_time' REG_TEL = r'^\d+[0-9-]+\d+$' REG_POST_CODE = r"\d{3}[-]?\d{4}" REG_NUMBER = r'^[0-9]+$' REG_MULTI_POSITIONS = r"^[0-9]+$|^[0-9]+-[0-9]+$|^\d+(?:,\d+)*\Z" REG_CONTINUED_POSITIONS = r"^([0-9]+)-([0-9]+)$" REPORT_SUBSCRIPTION_CONFIRM = "申込確認書" REPORT_SUBSCRIPTION = "申込書" CHOICE_CONTRACTOR_TYPE = ( ('1', '個人'), ('2', '法人'), ) CHOICE_GENDER = ( ('1', '男'), ('2', '女'), ) CHOICE_MARRIED = ( ('0', "未婚"), ('1', "既婚"), ) CHOICE_PAPER_DELIVERY_TYPE = ( ('01', '基本情報の住所'), ('02', '勤務先'), ('03', '連絡先'), ('04', '保証人'), ('99', 'その他'), ) CHOICE_HONORIFIC = ( ('1', '様'), ('2', '御中'), ) CHOICE_BANK_ACCOUNT_TYPE = ( ('1', "普通預金"), ('2', "定期預金"), ('3', "総合口座"), ('4', "当座預金"), ('5', "貯蓄預金"), ('6', "大口定期預金"), ('7', "積立定期預金") ) CHOICE_BANK_DEPOSIT_TYPE = ( ('1', "普通"), ('2', "当座"), ('4', "貯蓄"), ('9', "その他"), ) CHOICE_BANK_POST_KBN = ( (1, "銀行"), (2, "郵便局"), ) CHOICE_MANAGEMENT_TYPE = ( ('01', '管理委託'), ('02', '一括借上'), ('03', '一般物件'), ('04', '自社物件'), ) CHOICE_KEY_CATEGORY = ( ('01', '鍵'), ('02', 'カード'), ('03', 'リモコン'), ('04', 'パスワード'), ('05', 'その他の鍵'), ) CHOICE_PAY_TIMING = ( ('10', '契約時'), ('11', '契約開始月'), ('20', '更新時'), ('30', '翌月以降'), ('40', '一時'), ('41', '保管場所承諾証明書発行手数料'), ('42', '繰越') ) CHOICE_TAX_KBN = ( ('1', '税抜'), ('2', '税込'), ) CHOICE_DECIMAL_TYPE = ( ('0', '切り捨て'), ('1', '四捨五入'), ('2', '切り上げ'), ) CHOICE_PRICE_KBN = ( ('01', 'チラシ価格'), ('02', 'ホームページ価格'), ) CHOICE_PARKING_STATUS = ( ('01', '空き'), ('02', '手続中'), ('03', '空無'), ('04', '仮押'), ('05', '貸止'), ) CHOICE_MAIL_GROUP = ( ('010', '申込み用フォーム送付'), ('011', '申込み用フォーム入力完了'), ('012', '審査用フォーム送付'), ('013', '審査用フォーム入力完了'), ('040', '契約フォーム送付'), ('041', '契約フォーム入力完了'), ('042', '契約書送付'), ('060', '鍵類、操作説明書、配置図送付'), ('310', '一般解約書類送付'), ('322', '物件解約書類送付'), ('800', 'バッチ:鍵残件数アラート'), ) CHOICE_REPORT_KBN = ( ('001', REPORT_SUBSCRIPTION), ('002', REPORT_SUBSCRIPTION_CONFIRM), # ('01', '申込書'), # ('01', '申込書'), # ('01', '申込書'), # ('01', '申込書'), # ('01', '申込書'), # ('01', '申込書'), # ('01', '申込書'), # ('01', '申込書'), ) CHOICE_PROCESS = ( ('01', '申込みから成約まで'), ('20', '支払方法変更'), # ('21', '名義変更'), ('22', '車室変更'), ('23', '車両変更'), ('24', '鍵紛失'), ('25', '保管場所使用承諾証明書発行'), ('26', '値上げ更新'), ('27', '任意保険・自賠責保険更新'), ('28', '返金'), ('31', '解約'), ('32', '物件解約(承継なし)'), ('33', '物件解約(承継あり)'), ) CHOICE_TASK_SUBSCRIPTION_CATEGORY = [ # 01 申込 ('010', '申込み用フォーム送付'), ('011', '申込み情報確認'), ('012', '審査用フォーム送付'), # 03 審査 # ('030', '住所・電話番号 審査・確認'), ('031', '勤め先審査'), ('032', '車両サイズ審査'), # ('033', '申込ルート元審査'), ('034', '書類審査'), # 契約 ('040', '契約フォーム送付'), ('041', '契約情報確認'), ('042', '契約書送付'), # 入金 ('050', '入金確認'), ('060', '鍵類、操作説明書、配置図送付'), ] CHOICE_TASK_CREDIT_CATEGORY = [ ('200', '決済申込書発行'), ('201', '決済申込書確認'), ] # CHOICE_TASK_NAME_CATEGORY = [ # ('210', '契約書及び請求書の発行'), # ('211', '入金確認'), # ('212', '新契約書・請求書の送付'), # ('213', '結果確認'), # ] CHOICE_TASK_CHANGE_POSITION = [ ('220', '契約書等送付'), ('221', '書類確認'), ] CHOICE_TASK_CHANGE_CAR = [ ('230', '書類発行'), ] CHOICE_TASK_KEY_LOST = [ ('240', '「落し物」の有無確認'), ('241', '書類発行'), ('242', '入金確認'), ('243', '必要書類一式と操作鍵類の送付'), ('244', '操作鍵類の見積り依頼(オーナー側)'), ('245', '操作鍵類の発注/入金'), ] CHOICE_TASK_PRICE_RAISE = [ ('260', '更新書類の発行'), ('261', '更新書類の確認'), ] CHOICE_TASK_CONTRACT_CANCELLATION = [ ('310', '退出届送付'), ('311', '解約処理'), ('312', '鍵返送案内'), ('313', '鍵回収'), ] CHOICE_TASK_POSITION_CANCELLATION_WITHOUT_CONTINUE = [ ('320', '代替駐車場の調査'), ('321', 'ユーザーへ連絡'), ('322', '強制解約書類送付'), ('323', '滞納金確認'), ('324', '返金確認'), ('325', '鍵返送案内'), ('326', '鍵回収'), ] CHOICE_TASK_POSITION_CANCELLATION_WITH_CONTINUE = [ ('330', 'ユーザーへ連絡'), ('331', '承継承諾書送付'), ('332', '滞納金確認'), ('333', '返金確認'), ('334', '予備分の操作鍵類と契約時書類オーナー側へ送付'), ] CHOICE_TASK_CATEGORY = CHOICE_TASK_SUBSCRIPTION_CATEGORY + \ CHOICE_TASK_CREDIT_CATEGORY + \ CHOICE_TASK_CHANGE_POSITION + \ CHOICE_TASK_CHANGE_CAR + \ CHOICE_TASK_KEY_LOST + \ CHOICE_TASK_PRICE_RAISE + \ CHOICE_TASK_CONTRACT_CANCELLATION + \ CHOICE_TASK_POSITION_CANCELLATION_WITHOUT_CONTINUE + \ CHOICE_TASK_POSITION_CANCELLATION_WITH_CONTINUE CHOICE_TASK_STATUS = ( ('01', '未実施'), ('02', '実施中'), ('10', 'スキップ'), ('20', '見送る'), ('99', '完了'), ) CHOICE_CONTRACT_STATUS = ( ('01', '仮契約'), ('11', '本契約'), ('21', '破棄'), ) CHOICE_SUBSCRIPTION_STATUS = ( ('01', '新規申込'), ('02', '申込フォーム送付済'), ('03', '申込フォーム入力完了'), ('04', '審査フォーム送付済'), ('05', '審査フォーム入力完了'), ('06', '契約フォーム送付済'), ('07', '契約フォーム入力完了'), ('08', '契約書送付済'), ('09', '鍵類、操作説明書、配置図送付済'), ('11', '成約'), ('12', '破棄'), ) CHOICE_INSURANCE_JOIN_STATUS = ( ('within', '加入中'), ('without', '加入なし'), ('plans', '加入予定'), ) CHOICE_CONTRACT_PERIOD = ( ('long', '1年間(その後自動更新)'), ('short', '1・2ヶ月契約'), ) CHOICE_IS_REQUIRED = ( ('yes', '必要'), ('no', '不要'), ) CHOICE_TRANSFER_STATUS = ( ('00', '請求なし'), ('01', '金額不一致'), ('02', '名義不一致'), ('03', '繰越'), ('11', '完全一致'), ('99', 'その他'), ) CHOICE_PAYMENT_KBN = ( ('01', '振込'), ('02', '振替'), ('03', 'クレジット'), ) CHOICE_WAITING_STATUS = ( ('01', '新規'), ('10', '成約'), ('90', 'キャンセル'), ) CHOICE_BANK_ACCOUNT_STATUS = ( ('0', '使用なし'), ('1', '使用中'), ) CHOICE_TROUBLE_STATUS = ( ('01', '新規'), ('02', '対応中'), ('03', '完了'), ) CHOICE_SUBSCRIPTION_LIST_SEND_TYPE = ( ('01', '両方'), ('02', '賃貸管理会社'), ('03', '建物管理会社'), ) ERROR_SETTING_NO_SUBSCRIPTION = "申込書の出力書式が設定されていません、管理サイトで「出力書式」->「申込書一覧」にて設定してください。" ERROR_SETTING_NO_SUBSCRIPTION_CONFIRM = "申込確認書の出力書式が設定されていません、管理サイトで「出力書式」->「申込確認書一覧」にて設定してください。" ERROR_REQUEST_SIGNATURE = "サインしてください。" ERROR_PREV_TASK_UNFINISHED = '前のタスクは処理していないので、完了できません!' ERROR_SUBSCRIPTION_NO_CAR = '車情報がありません。' ERROR_SUBSCRIPTION_LOCKED = '貸止めになっているため、申込みはできません。' ERROR_SUBSCRIPTION_CONTRACTED = "既に契約中なので、申込みはできません。" ERROR_SUBSCRIPTION_PROCESS_NOT_FINISHED = "契約手続きはまだ完了されていません。" ERROR_SUBSCRIPTION_EMAIL_CONFIRM = "メールアドレスとメールアドレス(確認)は不一致です。" ERROR_SUBSCRIPTION_PRIVACY_AGREEMENT = "プライバシーポリシーおよび利用規約に承諾してください。" ERROR_CONTRACT_WRONG_RETIRE_DATE = "退居予定日は解約日の前に選択してください。" ERROR_CONTRACT_RETIRE_DATE_RANGE = "退居予定日は契約期間内に選択してください。" ERROR_CONTRACT_CANCELLATION_DATE_RANGE = "解約日は契約期間内に選択してください。" ERROR_PARKING_LOT_NOT_EXISTS = "駐車場は存在しません。" ERROR_PARKING_LOT_INVALID_STAFF_START_DATE = "担当者の担当開始日は間違っている、履歴の最終日以降に設定してください。" ERROR_PARKING_LOT_CANCELLATION_NO_POSITIONS = "物件解約の場合全体解約または車室を選択してください。" ERROR_PARKING_POSITION_NAME_NUMBER = "車室番号は数字だけを入力してください。" ERROR_PARKING_POSITION_RANGE = "範囲指定は間違っています。" ERROR_FORMAT_BANK_TRANSFER = "全銀フォーマットエラー。" ERROR_FORMAT_BANK_TRANSFER_CANNOT_IMPORT = "ファイル読み込みできません。" ERROR_REQUIRE_TRANSFER_DATA = "入金データを選択してください。" ERROR_REQUIRED_FIELD = "%s は必須項目です。"
apache-2.0
438,389,379,480,348,300
24.060519
97
0.542778
false
lig/picket_classic
apps/picket/__init__.py
1
1783
""" Copyright 2008 Serge Matveenko This file is part of Picket. Picket is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Picket is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Picket. If not, see <http://www.gnu.org/licenses/>. """ """ dictionary for storing copyrights and other project stuff """ COPYING = {} """ Picket home page url. """ COPYING['URL'] = 'http://picket.nophp.ru/' """ Picket version. Possibly with branch name """ COPYING['PICKET_VERSION'] = '0.3-master' """ List of Picket authors in order of their code appearence """ COPYING['AUTHORS'] = ['Serge Matveenko', 'TrashNRoll'] """ List of years of project development """ COPYING['YEARS'] = [2008, 2009, 2010,] """ GPL warning text as of 2008-10-10 """ COPYING['WARNING'] = \ """Picket is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Picket is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Picket. If not, see <http://www.gnu.org/licenses/>."""
gpl-3.0
-7,424,519,642,878,921,000
28.716667
68
0.748738
false
bchareyre/trial
examples/test/periodic-triax.py
1
1557
# coding: utf-8 # 2009 © Václav Šmilauer <eudoxos@arcig.cz> "Test and demonstrate use of PeriTriaxController." from yade import * from yade import pack,qt O.periodic=True O.cell.hSize=Matrix3(0.1, 0, 0, 0 ,0.1, 0, 0, 0, 0.1) sp=pack.SpherePack() radius=5e-3 num=sp.makeCloud(Vector3().Zero,O.cell.refSize,radius,.2,500,periodic=True) # min,max,radius,rRelFuzz,spheresInCell,periodic O.bodies.append([sphere(s[0],s[1]) for s in sp]) O.engines=[ ForceResetter(), InsertionSortCollider([Bo1_Sphere_Aabb()],verletDist=.05*radius), InteractionLoop( [Ig2_Sphere_Sphere_ScGeom()], [Ip2_FrictMat_FrictMat_FrictPhys()], [Law2_ScGeom_FrictPhys_CundallStrack()] ), #PeriTriaxController(maxUnbalanced=0.01,relStressTol=0.02,goal=[-1e4,-1e4,0],stressMask=3,globUpdate=5,maxStrainRate=[1.,1.,1.],doneHook='triaxDone()',label='triax'), #using cell inertia PeriTriaxController(dynCell=True,mass=0.2,maxUnbalanced=0.01,relStressTol=0.02,goal=(-1e4,-1e4,0),stressMask=3,globUpdate=5,maxStrainRate=(1.,1.,1.),doneHook='triaxDone()',label='triax'), NewtonIntegrator(damping=.2), ] O.dt=PWaveTimeStep() O.run(); qt.View() phase=0 def triaxDone(): global phase if phase==0: print 'Here we are: stress',triax.stress,'strain',triax.strain,'stiffness',triax.stiff print 'Now εz will go from 0 to .2 while σx and σy will be kept the same.' triax.goal=(-1e4,-1e4,-0.2) phase+=1 elif phase==1: print 'Here we are: stress',triax.stress,'strain',triax.strain,'stiffness',triax.stiff print 'Done, pausing now.' O.pause()
gpl-2.0
248,881,559,573,020,220
30.653061
188
0.710509
false
KangHsi/youtube-8m
model_utils.py
1
4302
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Contains a collection of util functions for model construction. """ import numpy import tensorflow as tf from tensorflow import logging from tensorflow import flags import tensorflow.contrib.slim as slim def SampleRandomSequence(model_input, num_frames, num_samples): """Samples a random sequence of frames of size num_samples. Args: model_input: A tensor of size batch_size x max_frames x feature_size num_frames: A tensor of size batch_size x 1 num_samples: A scalar Returns: `model_input`: A tensor of size batch_size x num_samples x feature_size """ batch_size = tf.shape(model_input)[0] frame_index_offset = tf.tile( tf.expand_dims(tf.range(num_samples), 0), [batch_size, 1]) max_start_frame_index = tf.maximum(num_frames - num_samples, 0) start_frame_index = tf.cast( tf.multiply( tf.random_uniform([batch_size, 1]), tf.cast(max_start_frame_index + 1, tf.float32)), tf.int32) frame_index = tf.minimum(start_frame_index + frame_index_offset, tf.cast(num_frames - 1, tf.int32)) batch_index = tf.tile( tf.expand_dims(tf.range(batch_size), 1), [1, num_samples]) index = tf.stack([batch_index, frame_index], 2) return tf.gather_nd(model_input, index) def SampleRandomFrames(model_input, num_frames, num_samples): """Samples a random set of frames of size num_samples. Args: model_input: A tensor of size batch_size x max_frames x feature_size num_frames: A tensor of size batch_size x 1 num_samples: A scalar Returns: `model_input`: A tensor of size batch_size x num_samples x feature_size """ batch_size = tf.shape(model_input)[0] frame_index = tf.cast( tf.multiply( tf.random_uniform([batch_size, num_samples]), tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32) batch_index = tf.tile( tf.expand_dims(tf.range(batch_size), 1), [1, num_samples]) index = tf.stack([batch_index, frame_index], 2) return tf.gather_nd(model_input, index) def SampleFramesOrdered(model_input, num_frames, num_samples): """Samples a random set of frames of size num_samples. Args: model_input: A tensor of size batch_size x max_frames x feature_size num_frames: A tensor of size batch_size x 1 num_samples: A scalar Returns: `model_input`: A tensor of size batch_size x num_samples x feature_size """ batch_size = tf.shape(model_input)[0] tmp=tf.tile(tf.range(0.0,1.0,1.0/num_samples),[batch_size]) frame_index = tf.cast( tf.multiply( tf.reshape(tmp,[batch_size,num_samples]), tf.tile(tf.cast(num_frames, tf.float32), [1, num_samples])), tf.int32) batch_index = tf.tile( tf.expand_dims(tf.range(batch_size), 1), [1, num_samples]) index = tf.stack([batch_index, frame_index], 2) return tf.gather_nd(model_input, index) def FramePooling(frames, method, **unused_params): """Pools over the frames of a video. Args: frames: A tensor with shape [batch_size, num_frames, feature_size]. method: "average", "max", "attention", or "none". Returns: A tensor with shape [batch_size, feature_size] for average, max, or attention pooling. A tensor with shape [batch_size*num_frames, feature_size] for none pooling. Raises: ValueError: if method is other than "average", "max", "attention", or "none". """ if method == "average": return tf.reduce_mean(frames, 1) elif method == "max": return tf.reduce_max(frames, 1) elif method == "none": feature_size = frames.shape_as_list()[2] return tf.reshape(frames, [-1, feature_size]) else: raise ValueError("Unrecognized pooling method: %s" % method)
apache-2.0
4,521,161,258,369,963,000
34.85
80
0.682938
false
andhit-r/opnsynid-accounting-report
opnsynid_asset_account/models/res_company.py
1
1348
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (c) 2015 Andhitia Rama. All rights reserved. # @author Andhitia Rama # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields class res_company(models.Model): """override company to add asset account""" _inherit = 'res.company' _name = 'res.company' asset_ids = fields.Many2many( string='Assets', comodel_name='account.account', relation='rel_company_2_asset_acc', column1='company_id', column2='account_id', )
agpl-3.0
5,641,121,555,060,802,000
36.444444
78
0.602374
false
Azure/azure-sdk-for-python
sdk/keyvault/azure-keyvault-secrets/tests/test_secrets_client.py
1
13664
# ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import functools from dateutil import parser as date_parse import time import logging import json from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError from azure.core.pipeline.policies import SansIOHTTPPolicy from azure.keyvault.secrets import SecretClient from _shared.test_case import KeyVaultTestCase from _test_case import client_setup, get_decorator, SecretsTestCase all_api_versions = get_decorator() logging_enabled = get_decorator(logging_enable=True) logging_disabled = get_decorator(logging_enable=False) # used for logging tests class MockHandler(logging.Handler): def __init__(self): super(MockHandler, self).__init__() self.messages = [] def emit(self, record): self.messages.append(record) class SecretClientTests(SecretsTestCase, KeyVaultTestCase): def _assert_secret_attributes_equal(self, s1, s2): self.assertEqual(s1.name, s2.name) self.assertEqual(s1.vault_url, s2.vault_url) self.assertEqual(s1.content_type, s2.content_type) self.assertEqual(s1.enabled, s2.enabled) self.assertEqual(s1.not_before, s2.not_before) self.assertEqual(s1.expires_on, s2.expires_on) self.assertEqual(s1.created_on, s2.created_on) self.assertEqual(s1.updated_on, s2.updated_on) self.assertEqual(s1.recovery_level, s2.recovery_level) self.assertEqual(s1.key_id, s2.key_id) def _validate_secret_bundle(self, secret_attributes, vault, secret_name, secret_value): prefix = "/".join(s.strip("/") for s in [vault, "secrets", secret_name]) id = secret_attributes.id self.assertTrue(id.index(prefix) == 0, "Id should start with '{}', but value is '{}'".format(prefix, id)) self.assertEqual( secret_attributes.value, secret_value, "value should be '{}', but is '{}'".format(secret_value, secret_attributes.value), ) self.assertTrue( secret_attributes.properties.created_on and secret_attributes.properties.updated_on, "Missing required date attributes.", ) def _validate_secret_list(self, secrets, expected): for secret in secrets: if secret.name in expected.keys(): expected_secret = expected[secret.name] self._assert_secret_attributes_equal(expected_secret.properties, secret) del expected[secret.name] self.assertEqual(len(expected), 0) @all_api_versions() @client_setup def test_secret_crud_operations(self, client, **kwargs): secret_name = self.get_resource_name("crud-secret") secret_value = "crud_secret_value" # create secret created = client.set_secret(secret_name, secret_value) self._validate_secret_bundle(created, client.vault_url, secret_name, secret_value) # set secret with optional arguments expires = date_parse.parse("2050-02-02T08:00:00.000Z") not_before = date_parse.parse("2015-02-02T08:00:00.000Z") content_type = "password" enabled = True tags = {"foo": "created tag"} created = client.set_secret( secret_name, secret_value, enabled=enabled, content_type=content_type, not_before=not_before, expires_on=expires, tags=tags, ) self._validate_secret_bundle(created, client.vault_url, secret_name, secret_value) self.assertEqual(content_type, created.properties.content_type) self.assertEqual(enabled, created.properties.enabled) self.assertEqual(not_before, created.properties.not_before) self.assertEqual(expires, created.properties.expires_on) self.assertEqual(tags, created.properties.tags) self._assert_secret_attributes_equal(created.properties, client.get_secret(created.name).properties) self._assert_secret_attributes_equal( created.properties, client.get_secret(created.name, created.properties.version).properties ) def _update_secret(secret): content_type = "text/plain" expires = date_parse.parse("2050-01-02T08:00:00.000Z") tags = {"foo": "updated tag"} enabled = not secret.properties.enabled updated_secret = client.update_secret_properties( secret.name, version=secret.properties.version, content_type=content_type, expires_on=expires, tags=tags, enabled=enabled, ) self.assertEqual(tags, updated_secret.tags) self.assertEqual(secret.id, updated_secret.id) self.assertEqual(content_type, updated_secret.content_type) self.assertEqual(expires, updated_secret.expires_on) self.assertNotEqual(secret.properties.enabled, updated_secret.enabled) self.assertNotEqual(secret.properties.updated_on, updated_secret.updated_on) return updated_secret if self.is_live: # wait to ensure the secret's update time won't equal its creation time time.sleep(1) updated = _update_secret(created) # delete secret deleted = client.begin_delete_secret(updated.name).result() self.assertIsNotNone(deleted) @all_api_versions() @client_setup def test_secret_list(self, client, **kwargs): max_secrets = self.list_test_size expected = {} # create many secrets for x in range(0, max_secrets): secret_name = self.get_resource_name("sec{}".format(x)) secret_value = "secVal{}".format(x) secret = None while not secret: secret = client.set_secret(secret_name, secret_value) expected[secret.name] = secret # list secrets result = list(client.list_properties_of_secrets(max_page_size=max_secrets - 1)) self._validate_secret_list(result, expected) @all_api_versions() @client_setup def test_list_versions(self, client, **kwargs): secret_name = self.get_resource_name("secVer") secret_value = "secVal" max_secrets = self.list_test_size expected = {} # create many secret versions for _ in range(0, max_secrets): secret = None while not secret: secret = client.set_secret(secret_name, secret_value) expected[secret.id] = secret result = client.list_properties_of_secret_versions(secret_name, max_page_size=max_secrets - 1) # validate list secret versions with attributes for secret in result: if secret.id in expected.keys(): expected_secret = expected[secret.id] del expected[secret.id] self._assert_secret_attributes_equal(expected_secret.properties, secret) self.assertEqual(len(expected), 0) @all_api_versions() @client_setup def test_list_deleted_secrets(self, client, **kwargs): expected = {} # create secrets for i in range(self.list_test_size): secret_name = self.get_resource_name("secret{}".format(i)) secret_value = "value{}".format(i) expected[secret_name] = client.set_secret(secret_name, secret_value) # delete them for secret_name in expected.keys(): client.begin_delete_secret(secret_name).wait() # validate list deleted secrets with attributes for deleted_secret in client.list_deleted_secrets(): self.assertIsNotNone(deleted_secret.deleted_date) self.assertIsNotNone(deleted_secret.scheduled_purge_date) self.assertIsNotNone(deleted_secret.recovery_id) if deleted_secret.name in expected: expected_secret = expected[deleted_secret.name] self._assert_secret_attributes_equal(expected_secret.properties, deleted_secret.properties) @all_api_versions() @client_setup def test_backup_restore(self, client, **kwargs): secret_name = self.get_resource_name("secbak") secret_value = "secVal" # create secret created_bundle = client.set_secret(secret_name, secret_value) # backup secret secret_backup = client.backup_secret(created_bundle.name) self.assertIsNotNone(secret_backup, "secret_backup") # delete secret client.begin_delete_secret(created_bundle.name).wait() # purge secret client.purge_deleted_secret(created_bundle.name) # restore secret restore_function = functools.partial(client.restore_secret_backup, secret_backup) restored_secret = self._poll_until_no_exception(restore_function, ResourceExistsError) self._assert_secret_attributes_equal(created_bundle.properties, restored_secret) @all_api_versions() @client_setup def test_recover(self, client, **kwargs): secrets = {} # create secrets to recover for i in range(self.list_test_size): secret_name = self.get_resource_name("secret{}".format(i)) secret_value = "value{}".format(i) secrets[secret_name] = client.set_secret(secret_name, secret_value) # delete all secrets for secret_name in secrets.keys(): client.begin_delete_secret(secret_name).wait() # validate all our deleted secrets are returned by list_deleted_secrets deleted = [s.name for s in client.list_deleted_secrets()] self.assertTrue(all(s in deleted for s in secrets.keys())) # recover select secrets for secret_name in secrets.keys(): client.begin_recover_deleted_secret(secret_name).wait() # validate the recovered secrets exist for secret_name in secrets.keys(): secret = client.get_secret(name=secret_name) self._assert_secret_attributes_equal(secret.properties, secrets[secret.name].properties) @all_api_versions() @client_setup def test_purge(self, client, **kwargs): secrets = {} # create secrets to purge for i in range(self.list_test_size): secret_name = self.get_resource_name("secret{}".format(i)) secret_value = "value{}".format(i) secrets[secret_name] = client.set_secret(secret_name, secret_value) # delete all secrets for secret_name in secrets.keys(): client.begin_delete_secret(secret_name).wait() # validate all our deleted secrets are returned by list_deleted_secrets deleted = [s.name for s in client.list_deleted_secrets()] self.assertTrue(all(s in deleted for s in secrets.keys())) # purge secrets for secret_name in secrets.keys(): client.purge_deleted_secret(secret_name) for secret_name in secrets.keys(): self._poll_until_exception(functools.partial(client.get_deleted_secret, secret_name), ResourceNotFoundError) deleted = [s.name for s in client.list_deleted_secrets()] self.assertTrue(not any(s in deleted for s in secrets.keys())) @logging_enabled() @client_setup def test_logging_enabled(self, client, **kwargs): mock_handler = MockHandler() logger = logging.getLogger("azure") logger.addHandler(mock_handler) logger.setLevel(logging.DEBUG) secret_name = self.get_resource_name("secret-name") client.set_secret(secret_name, "secret-value") for message in mock_handler.messages: if message.levelname == "DEBUG" and message.funcName == "on_request": try: body = json.loads(message.message) if body["value"] == "secret-value": return except (ValueError, KeyError): # this means the message is not JSON or has no kty property pass assert False, "Expected request body wasn't logged" @logging_disabled() @client_setup def test_logging_disabled(self, client, **kwargs): mock_handler = MockHandler() logger = logging.getLogger("azure") logger.addHandler(mock_handler) logger.setLevel(logging.DEBUG) secret_name = self.get_resource_name("secret-name") client.set_secret(secret_name, "secret-value") for message in mock_handler.messages: if message.levelname == "DEBUG" and message.funcName == "on_request": try: body = json.loads(message.message) assert body["value"] != "secret-value", "Client request body was logged" except (ValueError, KeyError): # this means the message is not JSON or has no kty property pass def test_service_headers_allowed_in_logs(): service_headers = {"x-ms-keyvault-network-info", "x-ms-keyvault-region", "x-ms-keyvault-service-version"} client = SecretClient("...", object()) assert service_headers.issubset(client._client._config.http_logging_policy.allowed_header_names) def test_custom_hook_policy(): class CustomHookPolicy(SansIOHTTPPolicy): pass client = SecretClient("...", object(), custom_hook_policy=CustomHookPolicy()) assert isinstance(client._client._config.custom_hook_policy, CustomHookPolicy)
mit
-892,249,785,946,571,100
38.836735
120
0.629172
false
peterwilletts24/Python-Scripts
plot_scripts/Rain/Diurnal/sea_diurnal_rain_plot_domain_constrain_bit_above western_ghats.py
1
10333
""" Load npy xy, plot and save """ import os, sys import matplotlib matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab! import matplotlib.pyplot as plt import matplotlib.cm as mpl_cm from matplotlib import rc from matplotlib.font_manager import FontProperties from matplotlib import rcParams from matplotlib import cm rc('text', usetex=True) rcParams['text.usetex']=True rcParams['text.latex.unicode']=True rc('font', family = 'serif', serif = 'cmr10') import numpy as np from datetime import timedelta import datetime import imp import re from textwrap import wrap model_name_convert_legend = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_legend.py') #unrotate = imp.load_source('util', '/home/pwille/python_scripts/modules/unrotate_pole.py') ############### # Things to change top_dir='/nfs/a90/eepdw/Data/Rain_Land_Sea_Diurnal' pp_file = 'avg.5216' lon_max = 71 lon_min = 67 lat_max= 28 lat_min=20 trmm_dir = '/nfs/a90/eepdw/Data/Observations/Satellite/TRMM/Diurnal/' trmm_file = "trmm_diurnal_average_lat_%s_%s_lon_%s_%s_bit_above_western_ghats.npz" % (lat_min,lat_max, lon_min, lon_max) ############# # Make own time x-axis d = matplotlib.dates.drange(datetime.datetime(2011, 8, 21, 6,30), datetime.datetime(2011, 8, 22, 6, 30), timedelta(hours=1)) formatter = matplotlib.dates.DateFormatter('%H:%M') def main(): #experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12 experiment_ids_p = ['djznw', 'djzny', 'djznq', 'dklzq', 'dkmbq', 'dkjxq' ] # Most of Params experiment_ids_e = ['dklwu', 'dklyu', 'djzns', 'dkbhu', 'djznu', 'dkhgu'] # Most of Explicit #experiment_ids = ['djzny', 'djznq', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ] #plt.ion() NUM_COLOURS = 15 cmap=cm.get_cmap(cm.Set1, NUM_COLOURS) #cgen = (cmap(1.*i/NUM_COLORS) for i in range(NUM_COLORS)) for ls in ['land','sea', 'total']: fig = plt.figure(figsize=(12,6)) ax = fig.add_subplot(111) legendEntries=[] legendtext=[] plot_trmm = np.load('%s%s_%s' % (trmm_dir, ls, trmm_file)) dates_trmm=[] p=[] for dp in plot_trmm['hour']: print dp if ((int(dp)<23) & (int(dp)>=6)): dates_trmm.append(datetime.datetime(2011, 8, 21, int(dp), 0)) p.append(plot_trmm['mean'][plot_trmm['hour']==dp]) if ((int(dp)>=0) & (int(dp)<=6)): dates_trmm.append(datetime.datetime(2011, 8, 22, int(dp), 0)) p.append(plot_trmm['mean'][plot_trmm['hour']==dp]) #print dates_trmm a = np.argsort(dates_trmm,axis=0) d_trmm = np.array(dates_trmm)[a] pl = (np.array(p)[a]) #pl=np.sort(pl,axis=1) l, = plt.plot_date(d_trmm, pl, label='TRMM', linewidth=2, linestyle='-', marker='', markersize=2, fmt='', color='#262626') legendEntries.append(l) legendtext.append('TRMM') l0=plt.legend(legendEntries, legendtext,title='', frameon=False, prop={'size':8}, loc=9, bbox_to_anchor=(0.21, 0,1, 1)) # Change the legend label colors to almost black texts = l0.texts for t in texts: t.set_color('#262626') legendEntries=[] legendtext=[] for c, experiment_id in enumerate(experiment_ids_p): expmin1 = experiment_id[:-1] if (experiment_id=='djznw'): print experiment_id colour = cmap(1.*1/NUM_COLOURS) linewidth=0.2 linestylez='--' if (experiment_id=='djzny'): print experiment_id colour = cmap(1.*3/NUM_COLOURS) linewidth=0.5 linestylez='--' if ((experiment_id=='djznq') or (experiment_id=='dkjxq')): print experiment_id colour = cmap(1.*5/NUM_COLOURS) linewidth=0.8 if (experiment_id=='djznq'): linestylez='--' if (experiment_id=='dkjxq'): linestylez=':' if ((experiment_id=='dklzq') or (experiment_id=='dklwu')): print experiment_id colour = cmap(1.*7/NUM_COLOURS) linewidth=1 if (experiment_id=='dklzq'): linestylez='--' if (experiment_id=='dklwu'): linestylez='-' if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')): print experiment_id colour = cmap(1.*9/NUM_COLOURS) linewidth=1.3 if (experiment_id=='dkmbq'): linestylez='--' if (experiment_id=='dklyu'): linestylez='-' if (experiment_id=='djzns'): print experiment_id colour = cmap(1.*11/NUM_COLOURS) linewidth=1.6 linestylez='-' if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')): print experiment_id colour = cmap(1.*13/NUM_COLOURS) linewidth=1.9 if (experiment_id=='dkbhu'): linestylez='-' if (experiment_id=='dkhgu'): linestylez=':' if (experiment_id=='djznu'): print experiment_id colour = cmap(1.*15/NUM_COLOURS) linewidth=2. linestylez='-' try: plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max)) l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour) legendEntries.append(l) legendtext.append('%s' % (model_name_convert_legend.main(experiment_id))) except Exception, e: print e pass l1=plt.legend(legendEntries, legendtext, title='Parametrised', loc=9, frameon=False, prop={'size':8}, bbox_to_anchor=(0, 0,1, 1)) # Change the legend label colors to almost black texts = l1.texts for t in texts: t.set_color('#262626') legendEntries=[] legendtext=[] c1=0 for c, experiment_id in enumerate(experiment_ids_e): if (experiment_id=='djznw'): print experiment_id colour = cmap(1.*1/NUM_COLOURS) linewidth=0.2 linestylez='--' if (experiment_id=='djzny'): print experiment_id colour = cmap(1.*3/NUM_COLOURS) linewidth=0.5 linestylez='--' if ((experiment_id=='djznq') or (experiment_id=='dkjxq')): print experiment_id colour = cmap(1.*5/NUM_COLOURS) linewidth=0.8 if (experiment_id=='djznq'): linestylez='--' if (experiment_id=='dkjxq'): linestylez=':' if ((experiment_id=='dklzq') or (experiment_id=='dklwu')): print experiment_id colour = cmap(1.*7/NUM_COLOURS) linewidth=1 if (experiment_id=='dklzq'): linestylez='--' if (experiment_id=='dklwu'): linestylez='-' if ((experiment_id=='dklyu') or (experiment_id=='dkmbq')): print experiment_id colour = cmap(1.*9/NUM_COLOURS) linewidth=1.3 if (experiment_id=='dkmbq'): linestylez='--' if (experiment_id=='dklyu'): linestylez='-' if (experiment_id=='djzns'): print experiment_id colour = cmap(1.*11/NUM_COLOURS) linewidth=1.6 linestylez='-' if ((experiment_id=='dkbhu')or (experiment_id=='dkhgu')): print experiment_id colour = cmap(1.*13/NUM_COLOURS) linewidth=1.9 if (experiment_id=='dkbhu'): linestylez='-' if (experiment_id=='dkhgu'): linestylez=':' if (experiment_id=='djznu'): print experiment_id colour = cmap(1.*15/NUM_COLOURS) linewidth=2. linestylez='-' expmin1 = experiment_id[:-1] try: plotnp = np.load('%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max)) l, = plt.plot_date(d, plotnp[0]*3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour) legendEntries.append(l) legendtext.append('%s' % (model_name_convert_legend.main(experiment_id))) except Exception, e: print e pass l2=plt.legend(legendEntries, legendtext, title='Explicit', loc=9, frameon=False, bbox_to_anchor=(0.11, 0,1, 1), prop={'size':8}) plt.gca().add_artist(l1) plt.gca().add_artist(l0) plt.gca().xaxis.set_major_formatter(formatter) # Change the legend label colors to almost black texts = l2.texts for t in texts: t.set_color('#262626') plt.xlabel('Time (UTC)') plt.ylabel('mm/h') title="Domain Averaged Rainfall - %s" % ls t=re.sub('(.{68} )', '\\1\n', str(title), 0, re.DOTALL) t = re.sub(r'[(\']', ' ', t) t = re.sub(r'[\',)]', ' ', t) pp_filenodot= pp_file.replace(".", "") # Bit of formatting # Set colour of axis lines spines_to_keep = ['bottom', 'left'] for spine in spines_to_keep: ax.spines[spine].set_linewidth(0.5) ax.spines[spine].set_color('#262626') # Remove top and right axes lines ("spines") spines_to_remove = ['top', 'right'] for spine in spines_to_remove: ax.spines[spine].set_visible(False) # Get rid of ticks. The position of the numbers is informative enough of # the position of the value. ax.xaxis.set_ticks_position('none') ax.yaxis.set_ticks_position('none') # Change the labels to the off-black ax.xaxis.label.set_color('#262626') ax.yaxis.label.set_color('#262626') if not os.path.exists('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/'): os.makedirs('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/') plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_bit_above_western_ghats_notitle.png' % (pp_filenodot, ls), format='png', bbox_inches='tight') plt.title('\n'.join(wrap('%s' % (t.title()), 1000,replace_whitespace=False)), fontsize=16, color='#262626') #plt.show() plt.savefig('/nfs/a90/eepdw/Figures/EMBRACE/Diurnal/%s_%s_latlon_bit_above_western_ghats.png' % (pp_filenodot, ls), format='png', bbox_inches='tight') plt.close() if __name__ == '__main__': main()
mit
4,530,510,237,832,121,300
30.123494
195
0.594987
false
dcalacci/Interactive_estimation
game/round/models.py
1
2767
from decimal import Decimal from django.db import models from django.conf import settings from django.utils import timezone # Create your models here. from game.contrib.calculate import calculate_score from game.control.models import Control class Plot(models.Model): plot = models.URLField() answer = models.DecimalField(max_digits=3, decimal_places=2) duration = models.TimeField(null=True) def __str__(self): return self.plot class SliderValue(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) timestamp = models.DateTimeField(default=timezone.now) this_round = models.ForeignKey("Round", null=True) round_order = models.PositiveSmallIntegerField(null=True) value = models.DecimalField(decimal_places=2, max_digits=3, null=True) def __str__(self): return "Slider {}".format(self.id) class Round(models.Model): user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) plot = models.ForeignKey(Plot, on_delete=models.CASCADE) round_order = models.PositiveSmallIntegerField() guess = models.DecimalField(max_digits=3, decimal_places=2, null=True) # start time of the round # end time of the round start_time = models.DateTimeField(auto_now=True, null=True) end_time = models.DateTimeField(null=True) # todo: change control treatment score = models.DecimalField(max_digits=3, decimal_places=2, default=Decimal(0)) def __str__(self): return self.user.username def round_data(self): played_rounds = self.__class__.objects.filter(user=self.user, round_order__lte=self.round_order, guess__gte=Decimal(0.0)) score = calculate_score(played_rounds) this_round = self.__class__.objects.filter(user=self.user, round_order=self.round_order, guess__gte=Decimal(0.0)) round_score = calculate_score(this_round) data = {'username': self.user.username, 'cumulative_score': score, 'avatar': self.user.avatar, 'task_path': self.plot.plot, 'correct_answer': self.plot.answer, 'independent_guess': self.guess, 'round_id': self.round_order, 'score': round_score, 'game_id': None, 'condition': None, 'following': None, 'revised_guess': None } if self.user.game_type == 'c': game = Control.objects.get(user=self.user) data['game_id'] = game.id data['condition'] = 'control' return data def get_guess(self): return float(self.guess) if self.guess else -1 class Meta: unique_together = (('user', 'round_order',),)
mit
-4,080,133,973,543,304,000
36.391892
108
0.644742
false
pdl30/pychiptools
pychiptools/utilities/alignment.py
1
3065
#!/usr/bin/python ######################################################################## # 20 Oct 2014 # Patrick Lombard, Centre for Stem Stem Research # Core Bioinformatics Group # University of Cambridge # All right reserved. ######################################################################## import argparse import subprocess import sys, re, os def paired_bowtie(fastq1, fastq2, name, index, outdir): sam1 = outdir + "/" + "tmp.sam" sam1_o = open(sam1, "wb") report = outdir+'/'+name+'_report.txt' report1_o = open(report, "wb") uniq = "bowtie -m 2 -v 1 --best --strata --seed 0 --sam {0} -1 {1} -2 {2}".format(index, fastq1, fastq2) p = subprocess.Popen(uniq.split(), stdout = sam1_o, stderr=report1_o) p.communicate() sam2 = outdir+"/"+name+".sam" grep_paired_unique(sam1, sam2) os.remove(sam1) def single_bowtie(fastq, name, index, outdir): sam1 = outdir + "/" + "tmp.sam" sam1_o = open(sam1, "wb") report = outdir+'/'+name+'_report.txt' report1_o = open(report, "wb") uniq = "bowtie -m 2 -v 1 --best --strata --seed 0 --sam {0} {1}".format(index, fastq) p = subprocess.Popen(uniq.split(), stdout = sam1_o, stderr=report1_o) p.communicate() sam2 = outdir+"/"+name+".sam" grep_single_unique(sam1, sam2) os.remove(sam1) def grep_paired_unique(samfile, outfile): output= open(outfile, "w") with open(samfile) as f: for line in f: line = line.rstrip() word = line.split("\t") if line.startswith("@"): output.write("{}\n".format(line)), continue if len(word) > 12: m = re.match("XS:i:", word[12]) if not m: if int(word[1]) == 147 or int(word[1]) == 83 or int(word[1]) == 99 or int(word[1]) == 163 or int(word[1]) == 81 or int(word[1]) == 97 or int(word[1]) == 145 or int(word[1]) == 161: output.write("{}\n".format(line)), def grep_single_unique(samfile, outfile): output= open(outfile, "w") with open(samfile) as f: for line in f: line = line.rstrip() word = line.split("\t") if line.startswith("@"): output.write("{}\n".format(line)), continue if len(word) > 12: m = re.match("XS:i:", word[12]) if not m: if int(word[1]) == 0 or int(word[1]) == 16: output.write("{}\n".format(line)), def paired_bowtie2(fastq1, fastq2, name, index, outdir, threads): report = outdir+'/'+name+'_report.txt' report1_o = open(report, "wb") uniq = "bowtie2 -p {4} -k 2 -N 1 --mm --no-mixed --no-discordant -x {0} -1 {1} -2 {2} -S {3}/tmp.sam".format(index, fastq1, fastq2, outdir, threads) p = subprocess.Popen(uniq.split(), stderr=report1_o) p.communicate() grep_paired_unique(outdir+"/tmp.sam", outdir+'/'+name+'.sam') os.remove(outdir+"/tmp.sam") def single_bowtie2(fastq, name, index, outdir, threads): report = outdir+'/'+name+'_report.txt' report1_o = open(report, "wb") uniq = "bowtie2 -p {3} -k 2 -N 1 --mm -x {0} -U {1} -S {2}/tmp.sam".format(index, fastq, outdir, threads) p = subprocess.Popen(uniq.split(), stderr=report1_o) p.communicate() grep_single_unique(outdir+"/tmp.sam", outdir+'/'+name+'.sam') os.remove(outdir+"/tmp.sam")
gpl-2.0
3,248,898,922,289,293,000
34.639535
185
0.596411
false
shawncaojob/LC
PY/140_word_break_ii.py
1
4564
# 140. Word Break II QuestionEditorial Solution My Submissions # Total Accepted: 68516 # Total Submissions: 321390 # Difficulty: Hard # Given a string s and a dictionary of words dict, add spaces in s to construct a sentence where each word is a valid dictionary word. # # Return all such possible sentences. # # For example, given # s = "catsanddog", # dict = ["cat", "cats", "and", "sand", "dog"]. # # A solution is ["cats and dog", "cat sand dog"]. # # Subscribe to see which companies asked this question # Notes: # Forward DP or Backward DP? # WB1 is forward DP. DP[i] means s[:i+1] is breakable # WB2 is backward. DP[i] means s[i:] is breakable # Since DFS is to check remaining string is # 12.08.2016 Rewrite. DP + DFS class Solution(object): dp = [] def wordBreak(self, s, wordDict): """ :type s: str :type wordDict: Set[str] :rtype: List[str] """ n = len(s) self.dp = [ False for x in xrange(len(s)+1) ] self.dp[0] = True for i in xrange(n): for j in xrange(i+1): tmp = s[j:i+1] if tmp in wordDict and self.dp[j]: self.dp[i+1] = True break if not self.dp[-1]: return [] res = [] self.dfs(res, "", s, n-1, wordDict) return res def dfs(self, res, line, s, end, wordDict): if end == -1: res.append(line[:-1]) return for start in xrange(end, -1, -1): tmp = s[start:end+1] if tmp in wordDict and self.dp[start]: self.dfs(res, tmp + " " + line, s, start - 1, wordDict) # DP + DFS can get rid of TLE class Solution(object): def wordBreak(self, s, wordDict): """ :type s: str :type wordDict: Set[str] :rtype: List[str] """ n = len(s) print(s) print(wordDict) res = [] dp = [False for x in xrange(n+1)] dp[n] = True for i in xrange(n-1, -1, -1): for j in xrange(n-1, i-1, -1): # Better loop. i start index. j end index if dp[j+1] and s[i:j+1] in wordDict: dp[i] = True break # for i in xrange(n-1, -1, -1): # for j in xrange(i, -1, -1): # if dp[i+1] and s[j:i+1] in wordDict: # dp[j] = True # continue def dfs(start, line): if not dp[start]: return if start == len(s): res.append(line[1:]) return for i in xrange(start, len(s)): if dp[i+1] and s[start:i+1] in wordDict: dfs(i+1, line + " " + s[start:i+1]) dfs(0, "") return res if __name__ == "__main__": # s = "catsanddog" # d = ["cat","cats","and","sand","dog"] # Solution().wordBreak(s, d) # s = "goalspecial" # d = ["go","goal","goals","special"] s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" d = ["a","aa","aaa","aaaa","aaaaa","aaaaaa","aaaaaaa","aaaaaaaa","aaaaaaaaa","aaaaaaaaaa"] print(Solution().wordBreak(s, d)) # Solution().wordBreak(s, d) # s1 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" # d1 = ["aa","aaa","aaaa","aaaaa","aaaaaa","aaaaaaa","aaaaaaaa","aaaaaaaaa","aaaaaaaaaa","ba"] # Solution().wordBreak(s1, d1) exit # If DFS only. get TLE class Solution2(object): def wordBreak(self, s, wordDict): """ :type s: str :type wordDict: Set[str] :rtype: List[str] """ res = [] # Precheck to get rid of TLE set_s = set(s) set_dict = set("".join(wordDict)) for char in set_s: if set_s not in set_dict: return [] self.dfs(s, wordDict, res, "") return res def dfs(self, s, wordDict, res, line): if not s: print(line) res.append(line) return for i in xrange(1, len(s)+1): if s[:i] in wordDict: if not line: self.dfs(s[i:], wordDict, res, s[:i]) else: self.dfs(s[i:], wordDict, res, line + " " + s[:i])
gpl-3.0
5,375,061,984,906,493,000
29.837838
163
0.510517
false
abhigd/bigboy
app/views/link.py
1
17235
import time, uuid, json, calendar from app import app from app.lib import upload, auth, files from app.lib import distribution, link from app.lib import geo from app.forms import * from flask import request, redirect from flask import session, render_template from flask import make_response, abort from flask import jsonify, Response, current_app from werkzeug.datastructures import MultiDict from rfc6266 import build_header import mimetypes from boto.sqs.message import Message as SQSMessage from flask.ext.login import login_user, current_user from flask.ext.login import login_required, login_url @app.route('/link/', methods=['GET']) @login_required def render_links(): link_list, links = [], [] owner = current_user.user_id start = 0 end = 10 page_num = start/10+1 target = request.args.get("target", None) if target: link_ids = current_app.redis_client.zrange("target_links:%s" %target, 0, -1) link_count = current_app.redis_client.zcard("target_links:%s" %target) else: link_ids = current_app.redis_client.smembers("user_links:%s" %owner) link_count = current_app.redis_client.scard("user_links:%s" %owner) if link_ids: link_list = current_app.redis_client.mget(["links:%s" %link_id \ for link_id in link_ids]) link_data = [json.loads(x) for x in link_list if x] for link in link_data: link["linkUrl"] = "%s/link/%s" %(app.config.get('HOSTNAME'), link["id"]) if link["expires_in"] < time.time(): link["expired"] = True else: link["expired"] = False links.append(link) if request.is_xhr: return jsonify({'data': links, 'total': link_count, 'perPage': 10, 'page': page_num }) else: return render_template('link.html', links=[]) @app.route('/link/<link_id>', methods=['GET']) def render_link(link_id): link_data = current_app.redis_client.get("links:%s" %link_id) link_targets = current_app.redis_client.zrange("link_targets:%s" %link_id, 0, -1) target_render_data = [] if link_data is None: abort(404) # TODO: Check if target's ACL still allows the owner of this # link to access link_info = json.loads(link_data) expires_in = link_info["expires_in"] if link_info["expires_in"] < time.time(): link_info["expired"] = True else: link_info["expired"] = False if link_info["expired"]: if not current_user.is_authenticated(): abort(404, "Link has expired") # Check for temp uploads and move them to link_targets if complete. # files will not be moved to the main bucket until the user # approves it. File will be served until then from the S3 bucket owner_info = json.loads(current_app.redis_client.get("user:%s" %link_info["owner"])) del owner_info["idp"] del owner_info["idp_id"] link_info["owner"] = owner_info if link_targets: target_ids = link_targets target_data = current_app.redis_client.mget(["files:%s" %x for x in target_ids]) target_info = [json.loads(x) for x in target_data if x] else: target_info = [] for idx, target in enumerate(target_info): target_url = "%s/link/%s/target/%s/%s" \ %(app.config.get('HOSTNAME'), link_id, target["id"], target["title"]) target["url"] = target_url target_download_count = current_app.redis_client.llen( "target_download_counter:%s:%s" \ %(link_id, target["id"])) target["count"] = int(target_download_count) del target["acl"] del target["source"] # del target["owner"] target["approved"] = True target_render_data.append(target) if current_user.is_authenticated(): temp_target_info = [] temp_uploads = current_app.redis_client.smembers("link_uploads:%s" %(link_id)) if temp_uploads: temp_target_data = current_app.redis_client.mget(["temp_files:%s" %x \ for x in temp_uploads]) temp_target_info = [json.loads(x) for x in temp_target_data if x] for idx, target in enumerate(temp_target_info): # Check if this file really exists in S3 else delete from index target_url = "%s/link/%s/upload/%s/%s" \ %(app.config.get('HOSTNAME'), link_id, target["id"], target["title"]) target["url"] = target_url target["approved"] = False target["count"] = 0 target_render_data.append(target) del target["bucket"] if request.headers["Accept"].startswith("text/html"): return render_template('link.html', link_data=link_info, targets=target_render_data) elif request.headers["Accept"].startswith("application/json"): link_data = dict(**link_info) link_data.update(targets=target_render_data) return jsonify(link_data) else: render_data = ["%s %s" %(target["title"][:18].ljust(20), target["url"]) \ for target in target_render_data] resp = make_response("\n".join(render_data)+"\n") resp.headers['content-type'] = "text/plain" return resp @app.route('/link/', methods=['POST']) @login_required def create_link(): created = int(time.time()) owner = current_user.user_id acl = {} form = LinkForm(MultiDict(request.json)) if not form.validate(): abort(400, form.errors) link_id = uuid.uuid4().hex link_targets = form.target.data.split(",") link_expires_in = time.gmtime((form.expires_in.data)) link_data = {"id": link_id, "owner": owner, "expires_in": form.expires_in.data, "created": created, "acl": acl, "max_uploads": 10, "max_upload_size": 104857600, "max_target_downloads": 0, "allow_downloads": True, "allow_uploads": False} current_app.redis_client.set("links:%s" %link_id, json.dumps(link_data)) current_app.redis_client.sadd("user_links:%s" %owner, link_id) if link_targets: target_ids = link_targets target_data = current_app.redis_client.mget(["files:%s" %x for x in target_ids]) targets = [json.loads(x) for x in target_data if x] for target in targets: link.create_link_target(link_data, target) link_data["linkUrl"] = "%s/link/%s" % (app.config.get('HOSTNAME'), link_id) link_data["expired"] = False return jsonify(link_data) @app.route('/link/<link_id>', methods=['PUT']) @login_required def edit_link(link_id): link_data = current_app.redis_client.get("links:%s" %link_id) if not link_data: abort(404) form = LinkForm(MultiDict(request.json)) if not form.validate(): abort(400, form.errors) link_info = json.loads(link_data) expires_in = form.expires_in.data link_data = {"id": link_id, "owner": link_info["owner"], "expires_in": expires_in, "created": link_info["created"], "acl": link_info["acl"], "max_uploads": form.max_uploads.data or 10, "max_upload_size": form.max_upload_size.data or 1024*1024*100, "max_target_downloads": form.max_target_downloads.data or 0, "allow_downloads": form.allow_downloads.data, "allow_uploads": form.allow_uploads.data} current_app.redis_client.set("links:%s" %link_id, json.dumps(link_data)) return jsonify(link_data) @app.route('/link/<link_id>', methods=["DELETE"]) @login_required def delete_link(link_id): link_data = current_app.redis_client.get("links:%s" %link_id) if not link_data: abort(404) target = request.args.get("target", None) if target: target_id = target.split("/", 2)[-1] target_data = current_app.redis_client.get("files:%s" % target_id) if not target_data: abort(400, "Specified File does not exist") link_info = json.loads(link_data) owner = link_info["owner"] # TODO: This is ambigious. This method does two things: # 1. Remove a link or # 2. Remove a specific target from a link if target is None: current_app.redis_client.delete("links:%s" %link_id) current_app.redis_client.srem("user_links:%s" %owner, link_id) current_app.redis_client.delete("link_uploads:%s" %link_id) else: link.delete_link_target(link_info, json.loads(target_data)) response = Response(status=204) return response # @app.route('/link/<link_id>/target/', methods=["GET"]) # def get_link_targets(link_id): # pass # # Get last 100 ids from source uploads @app.route('/link/<link_id>/search', methods=["GET"]) @login_required def search_link_targets(link_id): link_data = current_app.redis_client.get("links:%s" %link_id) if link_data is None: abort(404) last_100_files = current_app.redis_client.zrevrange("local_files", 0, 100) link_targets = current_app.redis_client.zrevrange("link_targets:%s" %link_id, 0, -1) interesting_files = set(last_100_files) - set(link_targets) data = files.get_file_data(interesting_files) return jsonify({'data': data}) @app.route('/link/<link_id>/target/<target_id>/<file_name>', methods=["GET"]) def get_link_target(link_id, target_id, file_name): link_data = current_app.redis_client.get("links:%s" %link_id) target_data = current_app.redis_client.get("files:%s" % target_id) if link_data is None or target_data is None: abort(404) link_info = json.loads(link_data) if link_info["expires_in"] < time.time(): abort(404, "Link has expired") if link_info["max_target_downloads"] > 0: target_d_count = current_app.redis_client.llen("target_download_counter:%s:%s" \ %(link_id, target_id)) if target_d_count >= link_info["max_target_downloads"]: abort(404, "Limit reached") target_exists = current_app.redis_client.zrank("link_targets:%s" %link_id, target_id) if target_exists is None: abort(404, "No such file exists") target_info = json.loads(target_data) signed_url = distribution.get_signed_url(target_info) current_app.redis_client.lpush("target_download_counter:%s:%s" \ %(link_id, target_id), time.time()) print signed_url return redirect(signed_url, code=307) @app.route('/link/<link_id>/target/<target_id>', methods=["PUT"]) @login_required def edit_link_target(link_id, target_id): form = EditLinkTargetForm(MultiDict(request.json)) link_data = current_app.redis_client.get("links:%s" %link_id) if not link_data: abort(404) if not form.validate(): abort(400, form.errors) approved = form.approved.data description = form.description.data if approved: temp_file_data = current_app.redis_client.get("temp_files:%s" % target_id) if not temp_file_data: abort(404) link_info = json.loads(link_data) temp_file_info = json.loads(temp_file_data) target = link.approve_link_target(link_info, temp_file_info) return jsonify(target) response = Response(status=204) return response @app.route('/link/<link_id>/target/', methods=["POST"]) def create_link_target(link_id): form = LinkTargetForm(MultiDict(request.json)) link_data = current_app.redis_client.get("links:%s" %link_id) if not link_data: abort(404) if not form.validate(): abort(400, form.errors) link_info = json.loads(link_data) if not current_user.is_authenticated(): if link_info["expires_in"] < time.time(): abort(404, "Link has expired") if not link_info["allow_uploads"]: abort(403, "Link does not allow anonymous uploads") target_id = form.target_id.data if current_user.is_authenticated(): target_data = current_app.redis_client.get("files:%s" %target_id) else: target_data = current_app.redis_client.get("temp_files:%s" %target_id) if not target_data: abort(400, form.errors) target_info = json.loads(target_data) if current_user.is_authenticated(): target = link.create_link_target(link_info, target_info) else: target_info["url"] = "#" target_info["count"] = 0 target_info["approved"] = False target_info["created"] = time.time() return jsonify(target_info) @app.route('/link/<link_id>/target/<target_id>', methods=["DELETE"]) @login_required def delete_link_target(link_id, target_id): link_data = current_app.redis_client.get("links:%s" %link_id) link_target = target_id if link_data is None: abort(404) target_data = current_app.redis_client.get("files:%s" %target_id) if not target_data: abort(400) link.delete_link_target(json.loads(link_data), json.loads(target_data)) response = Response(status=204) return response @app.route('/link/<link_id>/upload/<file_id>/<file_name>', methods=["GET"]) @login_required def get_temp_link_upload(link_id, file_id, file_name): link_data = current_app.redis_client.get("links:%s" %link_id) temp_file = current_app.redis_client.get("temp_files:%s" %file_id) temp_file_exists = current_app.redis_client.sismember("link_uploads:%s" %(link_id), file_id) if link_data is None or temp_file is None or temp_file_exists is False: abort(404) file_info = json.loads(temp_file) bucket = file_info["bucket"] file_type = file_info["type"] file_name = file_info["title"] file_content_disposition_header = build_header(file_name).encode('ascii') response_headers = {"response-content-disposition": file_content_disposition_header, "response-content-type": file_type} url = default_s3_conn.generate_url(600, "GET", bucket=bucket, key=file_id, response_headers=response_headers) return redirect(url, 307) @app.route('/link/<link_id>/upload/', methods=["POST"]) def link_target_upload_manage(link_id): # TODO: Check if link allows uploads. Set limits phase = request.args.get("phase", "init") link_data = current_app.redis_client.get("links:%s" %link_id) if link_data is None: abort(404) link_info = json.loads(link_data) if not current_user.is_authenticated(): if link_info["expires_in"] < time.time(): abort(404, "Link has expired") if not link_info["allow_uploads"]: abort(403) if phase in ["form", "init"]: # TODO: Add to a queue to track failed uploads later # TODO: If s3_key exists, then check if the owner is the same form = NewFileForm(MultiDict(request.json)) if not form.validate(): abort(400, form.errors) is_anonymous = not current_user.is_authenticated() response_data = upload.upload_init(phase, form, is_anonymous) elif phase == "complete": s3_key = request.json.get("s3_key", None) multipart_id = request.json.get("mp_id", None) if multipart_id is None or s3_key is None: abort(400) response_data = upload.upload_complete(phase, s3_key, multipart_id) if phase in ["form", "complete"]: if not current_user.is_authenticated(): s3_key = response_data["key"] current_app.redis_client.expire("temp_files:%s" %(s3_key), 600) current_app.redis_client.sadd("link_uploads:%s" %(link_id), s3_key) return jsonify(response_data) @app.route('/link/<link_id>/upload/<file_name>', methods=["PUT"]) def link_target_upload(link_id, file_name): #Check if link allows anonmous uploads # Set size limits for file. # Set storage type to reduced redundancy link_data = current_app.redis_client.get("links:%s" %link_id) if link_data is None: abort(404) link_info = json.loads(link_data) if link_info["expires_in"] < time.time(): abort(404, "Link has expired") if not link_info["allow_uploads"]: abort(403) content_length = request.headers["content-length"] content_type = mimetypes.guess_type(file_name)[0] or \ "application/octet-stream" if int(content_length) > link_info["max_upload_size"]: abort(400) url = upload.upload_curl(file_name, content_length, content_type) # curl -Lv --upload-file ~/Downloads/xxx.pdf http://celery.meer.io:5000/link/xxx/upload/ print url return redirect(url, 307)
apache-2.0
907,547,245,753,685,200
34.68323
92
0.59942
false
david-mateo/marabunta
marabunta/models/PerimeterDefenseRobot.py
1
7197
from marabunta import BaseRobot from math import * class PerimeterDefenseRobot(BaseRobot): """Robot model for perimeter defense. By iteratively calling the update() method, this robot will communicate with the rest of the swarm and move away from the others as far as possible. Takes a *threshold* parameter to determine when it has gone far enough and reached consensus. Can be set to 0. Obstacle avoidance (implemented in BaseRobot) will take precence over consensus reaching. """ def __init__(self, body, network, threshold): BaseRobot.__init__(self, body, network) self.threshold = threshold self.rendezvous_point = None self.path = [] self.known_lights = [] self.num_lights = 0 return def set_path(self, path): self.path = path[:] return self.path def spread_target(self): """Get the other agent's state and compute the direction of motion that will maximize distance with them. This is computed as a linear combination of the positions of each neighbor relative to the agent, where each position is weighted by the inverse of the distance**2 to that robot, t_i = sum_j (r_j - r_i)/|r_j - r_i|^2 , so that higher priority is given to move away from the closer agents, but still taking all into account and allowing for neighbors to "cancel each other out." Returns a vector pointing to the mean heading. If no agents are detected, returns None. """ neis = self.get_agents().values() pos = self.body.get_position() if neis: target = [0.,0.] for nei in neis: d2 = (nei[0]-pos[0])**2 + (nei[1]-pos[1])**2 if d2>0: target[0] += (pos[0] - nei[0])/d2 target[1] += (pos[1] - nei[1])/d2 norm2 = target[0]*target[0] + target[1]*target[1] if norm2 < self.threshold: target = None else: target = None return target def rendezvous_target(self): """Compute the target direction of movement that allows the robot to reach the rendezvous point (stored in self.rendezvous_point). When the robot is close enough to the point this sets self.rendezvous_point to None and also returns None as the target. """ if self.rendezvous_point: pos = self.body.get_position() target = [ self.rendezvous_point[0]-pos[0] , self.rendezvous_point[1]-pos[1] ] distance = sqrt(target[0]*target[0]+target[1]*target[1]) if distance < 0.10: # rendezvous point reached try: self.rendezvous_point = self.path.pop(0) target = self.rendezvous_target() except: target = [0., 0.] self.rendezvous_point = None else: try: self.rendezvous_point = self.path.pop(0) target = self.rendezvous_target() except: target = None self.rendezvous_point = None return target def move_to_target(self, target, deltat, v, block=False): """If the norm2 of *target* is is larger than *threshold*, align the robot to *target* and move forward for *deltat* at a speed *v*. Else, stop for *deltat*. """ if target[0]**2 + target[1]**2 > self.threshold*self.threshold: # Some robots allow for a block argument in # the align method. try: self.body.align(target, block) except (TypeError,AttributeError): self.align(target) self.move_forward(deltat, v) else: self.move_forward(deltat, 0) return def light_detected(self): """If light is detected and is a new light, broadcast its positon and add it to the list of known light sources. """ try: light = self.body.light_detected() except AttributeError: light = False if light: x, y = self.body.get_position() self.add_light(x,y) return light def process_messages(self): messages = self.network.get_messages() for message in messages: if len(message)>3: mesdata = message.split() if mesdata[0]=="stop": raise Exception("Stop!") elif mesdata[0]=="goto": try: self.rendezvous_point = (float(mesdata[1]), float(mesdata[2])) except: print("#PerimenterDefenseRobot: Strange message received: ",message) elif mesdata[0]=="light": try: x, y = float(mesdata[1]), float(mesdata[2]) except: x, y = None, None print("#PerimenterDefenseRobot: Strange message received: ",message) self.add_light(x,y) return messages def add_light(self, x, y): """Only add light to the list of known lights if this new one is at least 0.8 from any other previously known light. """ if all( (x-light[0])**2 + (y-light[1])**2 > 0.8 * 0.8 for light in self.known_lights): self.known_lights.append( (x,y) ) self.num_lights += 1 self.network.send_message("light\t%.2f\t%.2f\n"%(x,y)) return def update(self, deltat, v=None): """Perform one step of the consensus protocol. This is the main "behavior" of the robot. It consists of 4 steps: 1. Broadcast its state. 2. Perform swarming. In practice, this means computing the desired target direction of motion. (in this case, perform perimeter defense) 3. Correct the desired target in order to avoid obstacles. 4. Move in the desired target direction. """ self.broadcast_state() self.process_messages() # If goto message received, go there target = self.rendezvous_target() # check if rendezvous point has been reached if target and target[0]==0 and target[1]==0: return False, True # STOP HERE! if not target: # Perform swarming target = self.spread_target() if not target: h= self.body.get_heading() target = [10.*sqrt(self.threshold)*cos(h) ,10.*sqrt(self.threshold)*sin(h)] # Avoid obstacles target = self.correct_target(target) obstacle = self.obstacle_near() if obstacle and v: v *= 0.6 self.move_to_target(target, deltat, v, obstacle) light = self.light_detected() return light, False
gpl-3.0
-524,198,928,009,362,900
35.348485
94
0.539669
false
demisto/content
Packs/PANOSPolicyOptimizer/Integrations/PANOSPolicyOptimizer/PANOSPolicyOptimizer.py
1
18763
import hashlib from CommonServerPython import * class Client: """ Client to use in the APN-OS Policy Optimizer integration. """ def __init__(self, url: str, username: str, password: str, vsys: str, device_group: str, verify: bool, tid: int): # The TID is used to track individual commands send to the firewall/Panorama during a PHP session, and # is also used to generate the security token (Data String) that is used to validate each command. # Setting tid as a global variable with an arbitrary value of 50 self.session_metadata: Dict[str, Any] = {'panorama': url, 'base_url': url, 'username': username, 'password': password, 'tid': tid} if device_group and vsys: raise DemistoException( 'Cannot configure both vsys and Device group. Set vsys for firewall, set Device group for Panorama.') if not device_group and not vsys: raise DemistoException('Set vsys for firewall or Device group for Panorama.') self.machine = vsys if vsys else device_group self.verify = verify handle_proxy() # Use Session() in order to maintain cookies for persisting the login PHP session cookie self.session = requests.Session() def session_post(self, url: str, json_cmd: dict) -> dict: response = self.session.post(url=url, json=json_cmd, verify=self.verify) json_response = json.loads(response.text) if 'type' in json_response and json_response['type'] == 'exception': if 'message' in json_response: raise Exception(f'Operation to PAN-OS failed. with: {str(json_response["message"])}') raise Exception(f'Operation to PAN-OS failed. with: {str(json_response)}') return json_response def login(self) -> str: # This is the data sent to Panorama from the Login screen to complete the login and get a PHPSESSID cookie login_data = { 'prot': 'https:', 'server': self.session_metadata['panorama'], 'authType': 'init', 'challengeCookie': '', 'user': self.session_metadata['username'], 'passwd': self.session_metadata['password'], 'challengePwd': '', 'ok': 'Log In' } try: # Use a POST command to login to Panorama and create an initial session self.session.post(url=f'{self.session_metadata["base_url"]}/php/login.php?', data=login_data, verify=self.verify) # Use a GET command to the base URL to get the ServerToken which looks like this: # window.Pan.st.st.st539091 = "8PR8ML4A67PUMD3NU00L3G67M4958B996F61Q97T" response = self.session.post(url=f'{self.session_metadata["base_url"]}/', verify=self.verify) except Exception as err: raise Exception(f'Failed to login. Please double-check the credentials and the server URL. {str(err)}') # Use RegEx to parse the ServerToken string from the JavaScript variable match = re.search(r'(?:window\.Pan\.st\.st\.st[0-9]+\s=\s\")(\w+)(?:\")', response.text) # The JavaScript calls the ServerToken a "cookie" so we will use that variable name # The "data" field is the MD5 calculation of "cookie" + "TID" if not match: raise Exception('Failed to login. Please double-check the credentials and the server URL.') return match.group(1) def logout(self): self.session.post(url=f'{self.session_metadata["base_url"]}/php/logout.php?', verify=False) def token_generator(self) -> str: """ The PHP Security Token (Data String) is generated with the TID (counter) and a special session "cookie" :return: hash token """ data_code = f'{self.session_metadata["cookie"]}{str(self.session_metadata["tid"])}' data_hash = hashlib.md5(data_code.encode()) # Use the hashlib library function to calculate the MD5 data_string = data_hash.hexdigest() # Convert the hash to a proper hex string return data_string def get_policy_optimizer_statistics(self) -> dict: self.session_metadata['tid'] += 1 # Increment TID json_cmd = { "action": "PanDirect", "method": "run", "data": [ self.token_generator(), "PoliciesDirect.getRuleCountInRuleUsage", [{"type": "security", "position": "main", "vsysName": self.machine}] ], "type": "rpc", "tid": self.session_metadata['tid'] } return self.session_post( url=f'{self.session_metadata["base_url"]}/php/utils/router.php/PoliciesDirect.getRuleCountInRuleUsage', json_cmd=json_cmd) def policy_optimizer_no_apps(self) -> dict: self.session_metadata['tid'] += 1 # Increment TID json_cmd = { "action": "PanDirect", "method": "run", "data": [ self.token_generator(), "PoliciesDirect.getPoliciesByUsage", [ { "type": "security", "position": "main", "vsysName": self.machine, "isCmsSelected": False, "isMultiVsys": False, "showGrouped": False, "usageAttributes": { "timeframeTag": "30", "application/member": "any", "apps-seen-count": "geq \'1\'", "action": "allow" }, "pageContext": "app_usage", "field": "$.bytes", "direction": "DESC" } ] ], "type": "rpc", "tid": self.session_metadata['tid']} return self.session_post( url=f'{self.session_metadata["base_url"]}/php/utils/router.php/PoliciesDirect.getPoliciesByUsage', json_cmd=json_cmd) def policy_optimizer_get_unused_apps(self) -> dict: self.session_metadata['tid'] += 1 # Increment TID json_cmd = { "action": "PanDirect", "method": "run", "data": [ self.token_generator(), "PoliciesDirect.getPoliciesByUsage", [ { "type": "security", "position": "main", "vsysName": self.machine, "serialNumber": "", "isCmsSelected": False, "isMultiVsys": False, "showGrouped": False, "usageAttributes": { "timeframeTag": "30", "application/member": "unused", "action": "allow" }, "pageContext": "app_usage", "field": "$.bytes", "direction": "DESC" } ] ], "type": "rpc", "tid": self.session_metadata['tid']} return self.session_post( url=f'{self.session_metadata["base_url"]}/php/utils/router.php/PoliciesDirect.getPoliciesByUsage', json_cmd=json_cmd) def policy_optimizer_get_rules(self, timeframe: str, usage: str, exclude: bool) -> dict: self.session_metadata['tid'] += 1 # Increment TID json_cmd = { "action": "PanDirect", "method": "run", "data": [ self.token_generator(), "PoliciesDirect.getPoliciesByUsage", [ { "type": "security", "position": "main", "vsysName": self.machine, "isCmsSelected": False, "isMultiVsys": False, "showGrouped": False, "usageAttributes": { "timeframe": timeframe, "usage": usage, "exclude": exclude, "exclude-reset-text": "90" }, "pageContext": "rule_usage" } ] ], "type": "rpc", "tid": self.session_metadata['tid']} return self.session_post( url=f'{self.session_metadata["base_url"]}/php/utils/router.php/PoliciesDirect.getPoliciesByUsage', json_cmd=json_cmd) def policy_optimizer_app_and_usage(self, rule_uuid: str) -> dict: self.session_metadata['tid'] += 1 # Increment TID json_cmd = {"action": "PanDirect", "method": "run", "data": [ self.token_generator(), "PoliciesDirect.getAppDetails", [ { "type": "security", "vsysName": self.machine, "position": "main", "ruleUuidList": [rule_uuid], "summary": "no", "resultfields": "<member>apps-seen</member>" "<member>last-app-seen-since-count" "</member><member>days-no-new-app-count</member>", "appsSeenTimeframe": "any", "trafficTimeframe": 30 } ] ], "type": "rpc", "tid": self.session_metadata['tid']} return self.session_post( url=f'{self.session_metadata["base_url"]}/php/utils/router.php/PoliciesDirect.getAppDetails', json_cmd=json_cmd) def policy_optimizer_get_dag(self, dag: str) -> dict: self.session_metadata['tid'] += 1 # Increment TID json_cmd = { "action": "PanDirect", "method": "execute", "data": [ self.token_generator(), "AddressGroup.showDynamicAddressGroup", { "id": dag, "vsysName": self.machine } ], "type": "rpc", "tid": self.session_metadata['tid']} return self.session_post( url=f'{self.session_metadata["base_url"]}/php/utils/router.php/AddressGroup.showDynamicAddressGroup', json_cmd=json_cmd) def get_policy_optimizer_statistics_command(client: Client) -> CommandResults: """ Gets the Policy Optimizer Statistics as seen from the User Interface """ outputs_stats = {} raw_response = client.get_policy_optimizer_statistics() stats = raw_response['result'] if '@status' in stats and stats['@status'] == 'error': raise Exception(f'Operation Failed with: {str(stats)}') stats = stats['result'] # we need to spin the keys and values and put them into dict so they'll look better in the context for i in stats['entry']: outputs_stats[i['@name']] = i['text'] return CommandResults( outputs_prefix='PanOS.PolicyOptimizer.Stats', outputs=outputs_stats, readable_output=tableToMarkdown(name='Policy Optimizer Statistics:', t=stats['entry'], removeNull=True), raw_response=raw_response ) def policy_optimizer_no_apps_command(client: Client) -> CommandResults: """ Gets the Policy Optimizer Statistics as seen from the User Interface """ raw_response = client.policy_optimizer_no_apps() stats = raw_response['result'] if '@status' in stats and stats['@status'] == 'error': raise Exception(f'Operation Failed with: {str(stats)}') stats = stats['result'] if '@count' in stats and stats['@count'] == '0': return CommandResults(readable_output='No Rules without apps were found.', raw_response=raw_response) rules_no_apps = stats['entry'] if not isinstance(rules_no_apps, list): rules_no_apps = rules_no_apps[0] headers = ['@name', '@uuid', 'action', 'description', 'source', 'destination'] return CommandResults( outputs_prefix='PanOS.PolicyOptimizer.NoApps', outputs_key_field='@uuid', outputs=rules_no_apps, readable_output=tableToMarkdown(name='Policy Optimizer No App Specified:', t=rules_no_apps, headers=headers, removeNull=True), raw_response=raw_response ) def policy_optimizer_get_unused_apps_command(client: Client) -> CommandResults: """ Gets the Policy Optimizer Statistics as seen from the User Interface """ raw_response = client.policy_optimizer_get_unused_apps() stats = raw_response['result'] if '@status' in stats and stats['@status'] == 'error': raise Exception(f'Operation Failed with: {str(stats)}') stats = stats['result'] if '@count' in stats and stats['@count'] == '0': return CommandResults(readable_output='No Rules with unused apps were found.', raw_response=raw_response) return CommandResults( outputs_prefix='PanOS.PolicyOptimizer.UnusedApps', outputs_key_field='Stats', outputs=stats, readable_output=tableToMarkdown(name='Policy Optimizer Unused Apps:', t=stats['entry'], removeNull=True), raw_response=raw_response ) def policy_optimizer_get_rules_command(client: Client, args: dict) -> CommandResults: """ Gets the unused rules Statistics as seen from the User Interface """ timeframe = str(args.get('timeframe')) usage = str(args.get('usage')) exclude = argToBoolean(args.get('exclude')) raw_response = client.policy_optimizer_get_rules(timeframe, usage, exclude) stats = raw_response['result'] if '@status' in stats and stats['@status'] == 'error': raise Exception(f'Operation Failed with: {str(stats)}') stats = stats['result'] if '@count' in stats and stats['@count'] == '0': return CommandResults(readable_output=f'No {usage} rules where found.', raw_response=raw_response) rules = stats['entry'] if not isinstance(rules, list): rules = rules[0] headers = ['@name', '@uuid', 'action', 'description', 'source', 'destination'] return CommandResults( outputs_prefix=f'PanOS.PolicyOptimizer.{usage}Rules', outputs_key_field='@uuid', outputs=rules, readable_output=tableToMarkdown(name=f'PolicyOptimizer {usage}Rules:', t=rules, headers=headers, removeNull=True), raw_response=raw_response ) def policy_optimizer_app_and_usage_command(client: Client, args: dict) -> CommandResults: """ Gets the Policy Optimizer Statistics as seen from the User Interface """ rule_uuid = str(args.get('rule_uuid')) raw_response = client.policy_optimizer_app_and_usage(rule_uuid) stats = raw_response['result'] if '@status' in stats and stats['@status'] == 'error': raise Exception(f'Operation Failed with: {str(stats)}') stats = stats['result'] if '@count' in stats and stats['@count'] == '0': return CommandResults(readable_output=f'Rule with UUID:{rule_uuid} does not use apps.', raw_response=raw_response) rule_stats = stats['rules']['entry'][0] return CommandResults( outputs_prefix='PanOS.PolicyOptimizer.AppsAndUsage', outputs_key_field='@uuid', outputs=rule_stats, readable_output=tableToMarkdown(name='Policy Optimizer Apps and Usage:', t=rule_stats, removeNull=True), raw_response=raw_response ) def policy_optimizer_get_dag_command(client: Client, args: dict) -> CommandResults: """ Gets the DAG """ dag = str(args.get('dag')) raw_response = client.policy_optimizer_get_dag(dag) result = raw_response['result'] if '@status' in result and result['@status'] == 'error': raise Exception(f'Operation Failed with: {str(result)}') try: result = result['result']['dyn-addr-grp']['entry'][0]['member-list']['entry'] except KeyError: raise Exception(f'Dynamic Address Group: {dag} was not found.') return CommandResults( outputs_prefix='PanOS.PolicyOptimizer.DAG', outputs_key_field='Stats', outputs=result, readable_output=tableToMarkdown(name='Policy Optimizer Dynamic Address Group:', t=result, removeNull=True), raw_response=raw_response ) def main(): command = demisto.command() params = demisto.params() args = demisto.args() demisto.debug(f'Command being called is: {command}') client: Client = None # type: ignore try: client = Client(url=params.get('server_url'), username=params['credentials']['identifier'], password=params['credentials']['password'], vsys=params.get('vsys'), device_group=params.get('device_group'), verify=not params.get('insecure'), tid=50) client.session_metadata['cookie'] = client.login() # Login to PAN-OS and return the GUI cookie value if command == 'test-module': return_results('ok') # if login was successful, instance configuration is ok. elif command == 'pan-os-po-get-stats': return_results(get_policy_optimizer_statistics_command(client)) elif command == 'pan-os-po-no-apps': return_results(policy_optimizer_no_apps_command(client)) elif command == 'pan-os-po-unused-apps': return_results(policy_optimizer_get_unused_apps_command(client)) elif command == 'pan-os-po-get-rules': return_results(policy_optimizer_get_rules_command(client, args)) elif command == 'pan-os-po-app-and-usage': return_results(policy_optimizer_app_and_usage_command(client, args)) elif command == 'pan-os-get-dag': return_results(policy_optimizer_get_dag_command(client, args)) else: raise NotImplementedError(f'Command {command} was not implemented.') except Exception as err: return_error(f'{str(err)}.\n Trace:{traceback.format_exc()}') finally: try: client.logout() # Logout of PAN-OS except Exception as err: return_error(f'{str(err)}.\n Trace:{traceback.format_exc()}') if __name__ in ("__builtin__", "builtins", '__main__'): main()
mit
-3,208,916,145,890,129,000
41.450226
122
0.557054
false
Hernanarce/pelisalacarta
python/main-classic/platformcode/platformtools.py
1
48873
# -*- coding: utf-8 -*- # ------------------------------------------------------------ # pelisalacarta 4 # Copyright 2015 tvalacarta@gmail.com # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ # # Distributed under the terms of GNU General Public License v3 (GPLv3) # http://www.gnu.org/licenses/gpl-3.0.html # ------------------------------------------------------------ # This file is part of pelisalacarta 4. # # pelisalacarta 4 is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pelisalacarta 4 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>. # ------------------------------------------------------------ # platformtools # ------------------------------------------------------------ # Herramientas responsables de adaptar los diferentes # cuadros de dialogo a una plataforma en concreto, # en este caso Kodi. # version 2.0 # ------------------------------------------------------------ import os import sys import urllib import xbmc import xbmcgui import xbmcplugin from core import config from core import logger from core.item import Item from core.tmdb import Tmdb def dialog_ok(heading, line1, line2="", line3=""): dialog = xbmcgui.Dialog() return dialog.ok(heading, line1, line2, line3) def dialog_notification(heading, message, icon=0, time=5000, sound=True): dialog = xbmcgui.Dialog() try: l_icono = xbmcgui.NOTIFICATION_INFO, xbmcgui.NOTIFICATION_WARNING, xbmcgui.NOTIFICATION_ERROR dialog.notification(heading, message, l_icono[icon], time, sound) except: dialog_ok(heading, message) def dialog_yesno(heading, line1, line2="", line3="", nolabel="No", yeslabel="Si", autoclose=""): dialog = xbmcgui.Dialog() if autoclose: return dialog.yesno(heading, line1, line2, line3, nolabel, yeslabel, autoclose) else: return dialog.yesno(heading, line1, line2, line3, nolabel, yeslabel) def dialog_select(heading, _list): return xbmcgui.Dialog().select(heading, _list) def dialog_progress(heading, line1, line2=" ", line3=" "): dialog = xbmcgui.DialogProgress() dialog.create(heading, line1, line2, line3) return dialog def dialog_progress_bg(heading, message=""): try: dialog = xbmcgui.DialogProgressBG() dialog.create(heading, message) return dialog except: return dialog_progress(heading, message) def dialog_input(default="", heading="", hidden=False): keyboard = xbmc.Keyboard(default, heading, hidden) keyboard.doModal() if keyboard.isConfirmed(): return keyboard.getText() else: return None def dialog_numeric(_type, heading, default=""): dialog = xbmcgui.Dialog() d = dialog.numeric(_type, heading, default) return d def itemlist_refresh(): xbmc.executebuiltin("Container.Refresh") def itemlist_update(item): xbmc.executebuiltin("Container.Update(" + sys.argv[0] + "?" + item.tourl() + ")") def render_items(itemlist, parent_item): """ Función encargada de mostrar el itemlist en kodi, se pasa como parametros el itemlist y el item del que procede @type itemlist: list @param itemlist: lista de elementos a mostrar @type parent_item: item @param parent_item: elemento padre """ # Si el itemlist no es un list salimos if not type(itemlist) == list: if config.get_platform() == "boxee": xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True) return # Si no hay ningun item, mostramos un aviso if not len(itemlist): itemlist.append(Item(title="No hay elementos que mostrar")) # Recorremos el itemlist for item in itemlist: #logger.debug(item) # Si el item no contiene categoria, le ponemos la del item padre if item.category == "": item.category = parent_item.category # Si el item no contiene fanart, le ponemos el del item padre if item.fanart == "": item.fanart = parent_item.fanart # Formatear titulo if item.text_color: item.title = '[COLOR %s]%s[/COLOR]' % (item.text_color, item.title) if item.text_blod: item.title = '[B]%s[/B]' % item.title if item.text_italic: item.title = '[I]%s[/I]' % item.title #Añade headers a las imagenes si estan en un servidor con cloudflare from core import httptools item.thumbnail = httptools.get_url_headers(item.thumbnail) item.fanart = httptools.get_url_headers(item.fanart) # IconImage para folder y video if item.folder: icon_image = "DefaultFolder.png" else: icon_image = "DefaultVideo.png" # Creamos el listitem listitem = xbmcgui.ListItem(item.title, iconImage=icon_image, thumbnailImage=item.thumbnail) # Ponemos el fanart if item.fanart: listitem.setProperty('fanart_image', item.fanart) else: listitem.setProperty('fanart_image', os.path.join(config.get_runtime_path(), "fanart.jpg")) # TODO: ¿Se puede eliminar esta linea? yo no he visto que haga ningun efecto. xbmcplugin.setPluginFanart(int(sys.argv[1]), os.path.join(config.get_runtime_path(), "fanart.jpg")) # Añadimos los infoLabels set_infolabels(listitem, item) # Montamos el menu contextual context_commands = set_context_commands(item, parent_item) # Añadimos el item if config.get_platform() == "boxee": xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url='%s?%s' % (sys.argv[0], item.tourl()), listitem=listitem, isFolder=item.folder) else: listitem.addContextMenuItems(context_commands, replaceItems=True) if not item.totalItems: item.totalItems = 0 xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url='%s?%s' % (sys.argv[0], item.tourl()), listitem=listitem, isFolder=item.folder, totalItems=item.totalItems) # Fijar los tipos de vistas... if config.get_setting("forceview") == "true": # ...forzamos segun el viewcontent xbmcplugin.setContent(int(sys.argv[1]), parent_item.viewcontent) #logger.debug(parent_item) elif parent_item.channel not in ["channelselector", ""]: # ... o segun el canal xbmcplugin.setContent(int(sys.argv[1]), "movies") # Fijamos el "breadcrumb" xbmcplugin.setPluginCategory(handle=int(sys.argv[1]), category=parent_item.category.capitalize()) # No ordenar items xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_NONE) # Cerramos el directorio xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True) # Fijar la vista if config.get_setting("forceview") == "true": viewmode_id = get_viewmode_id(parent_item) xbmc.executebuiltin("Container.SetViewMode(%s)" % viewmode_id) def get_viewmode_id(parent_item): # viewmode_json habria q guardarlo en un archivo y crear un metodo para q el user fije sus preferencias en: # user_files, user_movies, user_tvshows, user_season y user_episodes. viewmode_json = {'skin.confluence': {'default_files': 50, 'default_movies': 515, 'default_tvshows': 508, 'default_seasons': 503, 'default_episodes': 504, 'view_list': 50, 'view_thumbnails': 500, 'view_movie_with_plot': 503}, 'skin.estuary': {'default_files': 50, 'default_movies': 54, 'default_tvshows': 502, 'default_seasons': 500, 'default_episodes': 53, 'view_list': 50, 'view_thumbnails': 500, 'view_movie_with_plot': 54}} # Si el parent_item tenia fijado un viewmode usamos esa vista... if parent_item.viewmode == 'movie': # Remplazamos el antiguo viewmode 'movie' por 'thumbnails' parent_item.viewmode = 'thumbnails' if parent_item.viewmode in ["list", "movie_with_plot", "thumbnails"]: viewName = "view_" + parent_item.viewmode '''elif isinstance(parent_item.viewmode, int): # only for debug viewName = parent_item.viewmode''' #...sino ponemos la vista por defecto en funcion del viewcontent else: viewName = "default_" + parent_item.viewcontent skinName = xbmc.getSkinDir() if skinName not in viewmode_json: skinName = 'skin.confluence' view_skin = viewmode_json[skinName] return view_skin.get(viewName, 50) def set_infolabels(listitem, item, player=False): """ Metodo para pasar la informacion al listitem (ver tmdb.set_InfoLabels() ) item.infoLabels es un dicionario con los pares de clave/valor descritos en: http://mirrors.xbmc.org/docs/python-docs/14.x-helix/xbmcgui.html#ListItem-setInfo @param listitem: objeto xbmcgui.ListItem @type listitem: xbmcgui.ListItem @param item: objeto Item que representa a una pelicula, serie o capitulo @type item: item """ if item.infoLabels: if 'mediatype' not in item.infoLabels: item.infoLabels['mediatype'] = item.contentType listitem.setInfo("video", item.infoLabels) if player and not item.contentTitle: if item.fulltitle: listitem.setInfo("video", {"Title": item.fulltitle}) else: listitem.setInfo("video", {"Title": item.title}) elif not player: listitem.setInfo("video", {"Title": item.title}) # Añadido para Kodi Krypton (v17) if config.get_platform(True)['num_version'] >= 17.0: listitem.setArt({"poster": item.thumbnail}) def set_context_commands(item, parent_item): """ Función para generar los menus contextuales. 1. Partiendo de los datos de item.context a. Metodo antiguo item.context tipo str separando las opciones por "|" (ejemplo: item.context = "1|2|3") (solo predefinidos) b. Metodo list: item.context es un list con las diferentes opciones del menu: - Predefinidos: Se cargara una opcion predefinida con un nombre. item.context = ["1","2","3"] - dict(): Se cargara el item actual modificando los campos que se incluyan en el dict() en caso de modificar los campos channel y action estos serán guardados en from_channel y from_action. item.context = [{"title":"Nombre del menu", "action": "action del menu", "channel":"channel del menu"}, {...}] 2. Añadiendo opciones segun criterios Se pueden añadir opciones al menu contextual a items que cumplan ciertas condiciones. 3. Añadiendo opciones a todos los items Se pueden añadir opciones al menu contextual para todos los items 4. Se pueden deshabilitar las opciones del menu contextual añadiendo un comando 'no_context' al item.context. Las opciones que Kodi, el skin u otro añadido añada al menu contextual no se pueden deshabilitar. @param item: elemento que contiene los menu contextuales @type item: item @param parent_item: @type parent_item: item """ context_commands = [] num_version_xbmc = config.get_platform(True)['num_version'] # Creamos un list con las diferentes opciones incluidas en item.context if type(item.context) == str: context = item.context.split("|") elif type(item.context) == list: context = item.context else: context = [] # Opciones segun item.context for command in context: # Predefinidos if type(command) == str: if command == "no_context": return [] if command == "buscar_trailer" or item.action == "findvideos": context_commands.append(("Buscar Trailer", "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone( channel="trailertools", action="buscartrailer", contextual=True ).tourl()))) # Formato dict if type(command) == dict: # Los parametros del dict, se sobreescriben al nuevo context_item en caso de sobreescribir "action" y # "channel", los datos originales se guardan en "from_action" y "from_channel" if "action" in command: command["from_action"] = item.action if "channel" in command: command["from_channel"] = item.channel if "goto" in command: context_commands.append((command["title"], "XBMC.Container.Refresh (%s?%s)" % (sys.argv[0], item.clone(**command).tourl()))) else: context_commands.append( (command["title"], "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(**command).tourl()))) # Opciones segun criterios, solo si el item no es un tag (etiqueta), ni es "Añadir a la biblioteca", etc... if item.action and item.action not in ["add_pelicula_to_library", "add_serie_to_library", "buscartrailer"]: # Mostrar informacion: si el item tiene plot suponemos q es una serie, temporada, capitulo o pelicula if item.infoLabels['plot'] and (num_version_xbmc < 17.0 or item.contentType == 'season'): context_commands.append(("Información", "XBMC.Action(Info)")) # ExtendedInfo: Si esta instalado el addon y se cumplen una serie de condiciones if xbmc.getCondVisibility('System.HasAddon(script.extendedinfo)') \ and config.get_setting("extended_info") == "true": if item.contentType == "episode" and item.contentEpisodeNumber and item.contentSeason \ and (item.infoLabels['tmdb_id'] or item.contentSerieName): param = "tvshow_id =%s, tvshow=%s, season=%s, episode=%s" \ % (item.infoLabels['tmdb_id'], item.contentSerieName, item.contentSeason, item.contentEpisodeNumber) context_commands.append(("ExtendedInfo", "XBMC.RunScript(script.extendedinfo,info=extendedepisodeinfo,%s)" % param)) elif item.contentType == "season" and item.contentSeason \ and (item.infoLabels['tmdb_id'] or item.contentSerieName): param = "tvshow_id =%s,tvshow=%s, season=%s" \ % (item.infoLabels['tmdb_id'], item.contentSerieName, item.contentSeason) context_commands.append(("ExtendedInfo", "XBMC.RunScript(script.extendedinfo,info=seasoninfo,%s)" % param)) elif item.contentType == "tvshow" and (item.infoLabels['tmdb_id'] or item.infoLabels['tvdb_id'] or item.infoLabels['imdb_id'] or item.contentSerieName): param = "id =%s,tvdb_id=%s,imdb_id=%s,name=%s" \ % (item.infoLabels['tmdb_id'], item.infoLabels['tvdb_id'], item.infoLabels['imdb_id'], item.contentSerieName) context_commands.append(("ExtendedInfo", "XBMC.RunScript(script.extendedinfo,info=extendedtvinfo,%s)" % param)) elif item.contentType == "movie" and (item.infoLabels['tmdb_id'] or item.infoLabels['imdb_id'] or item.contentTitle): param = "id =%s,imdb_id=%s,name=%s" \ % (item.infoLabels['tmdb_id'], item.infoLabels['imdb_id'], item.contentTitle) context_commands.append(("ExtendedInfo", "XBMC.RunScript(script.extendedinfo,info=extendedinfo,%s)" % param)) # InfoPlus if config.get_setting("infoplus") == "true": if item.infoLabels['tmdb_id'] or item.infoLabels['imdb_id'] or item.infoLabels['tvdb_id'] or \ (item.contentTitle and item.infoLabels["year"]) or item.contentSerieName: context_commands.append(("InfoPlus","XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone( channel="infoplus", action="start", from_channel=item.channel).tourl()))) # Ir al Menu Principal (channel.mainlist) if parent_item.channel not in ["novedades", "channelselector"] and item.action != "mainlist" \ and parent_item.action != "mainlist": context_commands.append(("Ir al Menu Principal", "XBMC.Container.Refresh (%s?%s)" % (sys.argv[0], Item(channel=item.channel, action="mainlist").tourl()))) # Añadir a Favoritos if num_version_xbmc < 17.0 and ((item.channel not in ["favoritos", "biblioteca", "ayuda", ""] or item.action in ["update_biblio"]) and not parent_item.channel == "favoritos"): context_commands.append((config.get_localized_string(30155), "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(channel="favoritos", action="addFavourite", from_channel=item.channel, from_action=item.action).tourl()))) if item.channel != "biblioteca": # Añadir Serie a la biblioteca if item.action in ["episodios", "get_episodios"] and item.contentSerieName: context_commands.append(("Añadir Serie a Biblioteca", "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(action="add_serie_to_library", from_action=item.action).tourl()))) # Añadir Pelicula a Biblioteca elif item.action in ["detail", "findvideos"] and item.contentType == 'movie' and item.contentTitle: context_commands.append(("Añadir Pelicula a Biblioteca", "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(action="add_pelicula_to_library", from_action=item.action).tourl()))) if item.channel != "descargas": # Descargar pelicula if item.contentType == "movie" and item.contentTitle: context_commands.append(("Descargar Pelicula", "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(channel="descargas", action="save_download", from_channel=item.channel, from_action=item.action) .tourl()))) elif item.contentSerieName: # Descargar serie if item.contentType == "tvshow": context_commands.append(("Descargar Serie", "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(channel="descargas", action="save_download", from_channel=item.channel, from_action=item.action).tourl()))) # Descargar episodio if item.contentType == "episode": context_commands.append(("Descargar Episodio", "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(channel="descargas", action="save_download", from_channel=item.channel, from_action=item.action).tourl()))) # Descargar temporada if item.contentType == "season": context_commands.append(("Descargar Temporada", "XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(channel="descargas", action="save_download", from_channel=item.channel, from_action=item.action).tourl()))) # Abrir configuración if parent_item.channel not in ["configuracion", "novedades", "buscador"]: context_commands.append(("Abrir Configuración", "XBMC.Container.Update(%s?%s)" % (sys.argv[0], Item(channel="configuracion", action="mainlist").tourl()))) # Añadir SuperFavourites al menu contextual (1.0.53 o superior necesario) sf_file_path = xbmc.translatePath("special://home/addons/plugin.program.super.favourites/LaunchSFMenu.py") check_sf = os.path.exists(sf_file_path) if check_sf and xbmc.getCondVisibility('System.HasAddon("plugin.program.super.favourites")'): context_commands.append(("Super Favourites Menu", "XBMC.RunScript(special://home/addons/plugin.program.super.favourites/LaunchSFMenu.py)")) return sorted(context_commands, key=lambda comand: comand[0]) def is_playing(): return xbmc.Player().isPlaying() def play_video(item, strm=False): logger.info("pelisalacarta.platformcode.platformtools play_video") # logger.debug(item.tostring('\n')) if item.channel == 'descargas': logger.info("Reproducir video local: %s [%s]" % (item.title, item.url)) xlistitem = xbmcgui.ListItem(path=item.url, thumbnailImage=item.thumbnail) set_infolabels(xlistitem, item, True) xbmc.Player().play(item.url, xlistitem) return default_action = config.get_setting("default_action") logger.info("default_action=" + default_action) # Abre el diálogo de selección para ver las opciones disponibles opciones, video_urls, seleccion, salir = get_dialogo_opciones(item, default_action, strm) if salir: return # se obtienen la opción predeterminada de la configuración del addon seleccion = get_seleccion(default_action, opciones, seleccion, video_urls) if seleccion < 0: # Cuadro cancelado return logger.info("seleccion=%d" % seleccion) logger.info("seleccion=%s" % opciones[seleccion]) # se ejecuta la opcion disponible, jdwonloader, descarga, favoritos, añadir a la biblioteca... SI NO ES PLAY salir = set_opcion(item, seleccion, opciones, video_urls) if salir: return # obtenemos el video seleccionado mediaurl, view, mpd = get_video_seleccionado(item, seleccion, video_urls) if mediaurl == "": return # se obtiene la información del video. xlistitem = xbmcgui.ListItem(path=mediaurl, thumbnailImage=item.thumbnail) set_infolabels(xlistitem, item, True) # si se trata de un vídeo en formato mpd, se configura el listitem para reproducirlo # con el addon inpustreamaddon implementado en Kodi 17 if mpd: xlistitem.setProperty('inputstreamaddon', 'inputstream.adaptive') xlistitem.setProperty('inputstream.adaptive.manifest_type', 'mpd') # se lanza el reproductor set_player(item, xlistitem, mediaurl, view, strm) # si es un archivo de la biblioteca enviar a marcar como visto if strm or item.strm_path: from platformcode import xbmc_library xbmc_library.mark_auto_as_watched(item) def get_seleccion(default_action, opciones, seleccion, video_urls): # preguntar if default_action == "0": # "Elige una opción" seleccion = dialog_select(config.get_localized_string(30163), opciones) # Ver en calidad baja elif default_action == "1": seleccion = 0 # Ver en alta calidad elif default_action == "2": seleccion = len(video_urls) - 1 # jdownloader elif default_action == "3": seleccion = seleccion else: seleccion = 0 return seleccion def show_channel_settings(list_controls=None, dict_values=None, caption="", callback=None, item=None, custom_button=None, channelpath=None): """ Muestra un cuadro de configuracion personalizado para cada canal y guarda los datos al cerrarlo. Parametros: ver descripcion en xbmc_config_menu.SettingsWindow @param list_controls: lista de elementos a mostrar en la ventana. @type list_controls: list @param dict_values: valores que tienen la lista de elementos. @type dict_values: dict @param caption: titulo de la ventana @type caption: str @param callback: función que se llama tras cerrarse la ventana. @type callback: str @param item: item para el que se muestra la ventana de configuración. @type item: Item @param custom_button: botón personalizado, que se muestra junto a "OK" y "Cancelar". @type custom_button: dict @return: devuelve la ventana con los elementos @rtype: SettingsWindow """ from xbmc_config_menu import SettingsWindow return SettingsWindow("ChannelSettings.xml", config.get_runtime_path()) \ .start(list_controls=list_controls, dict_values=dict_values, title=caption, callback=callback, item=item, custom_button=custom_button, channelpath=channelpath) def show_video_info(data, caption="", item=None, scraper=Tmdb): """ Muestra una ventana con la info del vídeo. Opcionalmente se puede indicar el titulo de la ventana mendiante el argumento 'caption'. Si se pasa un item como argumento 'data' usa el scrapper Tmdb para buscar la info del vídeo En caso de peliculas: Coge el titulo de los siguientes campos (en este orden) 1. contentTitle (este tiene prioridad 1) 2. fulltitle (este tiene prioridad 2) 3. title (este tiene prioridad 3) El primero que contenga "algo" lo interpreta como el titulo (es importante asegurarse que el titulo este en su sitio) En caso de series: 1. Busca la temporada y episodio en los campos contentSeason y contentEpisodeNumber 2. Intenta Sacarlo del titulo del video (formato: 1x01) Aqui hay dos opciones posibles: 1. Tenemos Temporada y episodio Muestra la información del capitulo concreto 2. NO Tenemos Temporada y episodio En este caso muestra la informacion generica de la serie Si se pasa como argumento 'data' un objeto InfoLabels(ver item.py) muestra en la ventana directamente la información pasada (sin usar el scrapper) Formato: En caso de peliculas: infoLabels({ "type" : "movie", "title" : "Titulo de la pelicula", "original_title" : "Titulo original de la pelicula", "date" : "Fecha de lanzamiento", "language" : "Idioma original de la pelicula", "rating" : "Puntuacion de la pelicula", "votes" : "Numero de votos", "genres" : "Generos de la pelicula", "thumbnail" : "Ruta para el thumbnail", "fanart" : "Ruta para el fanart", "plot" : "Sinopsis de la pelicula" } En caso de series: infoLabels({ "type" : "tv", "title" : "Titulo de la serie", "episode_title" : "Titulo del episodio", "date" : "Fecha de emision", "language" : "Idioma original de la serie", "rating" : "Puntuacion de la serie", "votes" : "Numero de votos", "genres" : "Generos de la serie", "thumbnail" : "Ruta para el thumbnail", "fanart" : "Ruta para el fanart", "plot" : "Sinopsis de la del episodio o de la serie", "seasons" : "Numero de Temporadas", "season" : "Temporada", "episodes" : "Numero de episodios de la temporada", "episode" : "Episodio" } Si se pasa como argumento 'data' un listado de InfoLabels() con la estructura anterior, muestra los botones 'Anterior' y 'Siguiente' para ir recorriendo la lista. Ademas muestra los botones 'Aceptar' y 'Cancelar' que llamaran a la funcion 'callback' del canal desde donde se realiza la llamada pasandole como parametros el elemento actual (InfoLabels()) o None respectivamente. @param data: información para obtener datos del scraper. @type data: item, InfoLabels, list(InfoLabels) @param caption: titulo de la ventana. @type caption: str @param item: elemento del que se va a mostrar la ventana de información @type item: Item @param scraper: scraper que tiene los datos de las peliculas o series a mostrar en la ventana. @type scraper: Scraper """ from xbmc_info_window import InfoWindow return InfoWindow("InfoWindow.xml", config.get_runtime_path()).Start(data, caption=caption, item=item, scraper=scraper) def show_recaptcha(key, referer): from recaptcha import Recaptcha return Recaptcha("Recaptcha.xml", config.get_runtime_path()).Start(key, referer) def alert_no_disponible_server(server): # 'El vídeo ya no está en %s' , 'Prueba en otro servidor o en otro canal' dialog_ok(config.get_localized_string(30055), (config.get_localized_string(30057) % server), config.get_localized_string(30058)) def alert_unsopported_server(): # 'Servidor no soportado o desconocido' , 'Prueba en otro servidor o en otro canal' dialog_ok(config.get_localized_string(30065), config.get_localized_string(30058)) def handle_wait(time_to_wait, title, text): logger.info("handle_wait(time_to_wait=%d)" % time_to_wait) espera = dialog_progress(' ' + title, "") secs = 0 increment = int(100 / time_to_wait) cancelled = False while secs < time_to_wait: secs += 1 percent = increment * secs secs_left = str((time_to_wait - secs)) remaining_display = ' Espera ' + secs_left + ' segundos para que comience el vídeo...' espera.update(percent, ' ' + text, remaining_display) xbmc.sleep(1000) if espera.iscanceled(): cancelled = True break if cancelled: logger.info('Espera cancelada') return False else: logger.info('Espera finalizada') return True def get_dialogo_opciones(item, default_action, strm): logger.info("platformtools get_dialogo_opciones") #logger.debug(item.tostring('\n')) from core import servertools opciones = [] error = False try: item.server = item.server.lower() except AttributeError: item.server = "" if item.server == "": item.server = "directo" # Si no es el modo normal, no muestra el diálogo porque cuelga XBMC muestra_dialogo = (config.get_setting("player_mode") == "0" and not strm) # Extrae las URL de los vídeos, y si no puedes verlo te dice el motivo #Permitir varias calidades para server "directo" if item.video_urls: video_urls, puedes, motivo = item.video_urls, True, "" else: video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing( item.server, item.url, item.password, muestra_dialogo) seleccion = 0 # Si puedes ver el vídeo, presenta las opciones if puedes: for video_url in video_urls: opciones.append(config.get_localized_string(30151) + " " + video_url[0]) if item.server == "local": opciones.append(config.get_localized_string(30164)) else: # "Descargar" opcion = config.get_localized_string(30153) opciones.append(opcion) if item.isFavourite: # "Quitar de favoritos" opciones.append(config.get_localized_string(30154)) else: # "Añadir a favoritos" opciones.append(config.get_localized_string(30155)) if not strm and item.contentType == 'movie': # "Añadir a Biblioteca" opciones.append(config.get_localized_string(30161)) if config.get_setting("jdownloader_enabled") == "true": # "Enviar a JDownloader" opciones.append(config.get_localized_string(30158)) if default_action == "3": seleccion = len(opciones) - 1 # Busqueda de trailers en youtube if item.channel not in ["Trailer", "ecarteleratrailers"]: # "Buscar Trailer" opciones.append(config.get_localized_string(30162)) # Si no puedes ver el vídeo te informa else: if item.server != "": if "<br/>" in motivo: dialog_ok("No puedes ver ese vídeo porque...", motivo.split("<br/>")[0], motivo.split("<br/>")[1], item.url) else: dialog_ok("No puedes ver ese vídeo porque...", motivo, item.url) else: dialog_ok("No puedes ver ese vídeo porque...", "El servidor donde está alojado no está", "soportado en pelisalacarta todavía", item.url) if item.channel == "favoritos": # "Quitar de favoritos" opciones.append(config.get_localized_string(30154)) if len(opciones) == 0: error = True return opciones, video_urls, seleccion, error def set_opcion(item, seleccion, opciones, video_urls): logger.info("platformtools set_opcion") # logger.debug(item.tostring('\n')) salir = False # No ha elegido nada, lo más probable porque haya dado al ESC # TODO revisar if seleccion == -1: # Para evitar el error "Uno o más elementos fallaron" al cancelar la selección desde fichero strm listitem = xbmcgui.ListItem(item.title, iconImage="DefaultVideo.png", thumbnailImage=item.thumbnail) xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, listitem) # "Enviar a JDownloader" if opciones[seleccion] == config.get_localized_string(30158): from core import scrapertools # TODO comprobar que devuelve 'data' if item.subtitle != "": data = scrapertools.cachePage(config.get_setting("jdownloader") + "/action/add/links/grabber0/start1/web=" + item.url + " " + item.thumbnail + " " + item.subtitle) else: data = scrapertools.cachePage(config.get_setting("jdownloader") + "/action/add/links/grabber0/start1/web=" + item.url + " " + item.thumbnail) salir = True # "Descargar" elif opciones[seleccion] == config.get_localized_string(30153): from channels import descargas if item.contentType == "list" or item.contentType == "tvshow": item.contentType = "video" item.play_menu = True descargas.save_download(item) salir = True # "Quitar de favoritos" elif opciones[seleccion] == config.get_localized_string(30154): from channels import favoritos favoritos.delFavourite(item) salir = True # "Añadir a favoritos": elif opciones[seleccion] == config.get_localized_string(30155): from channels import favoritos item.from_channel = "favoritos" favoritos.addFavourite(item) salir = True # "Añadir a Biblioteca": # Library elif opciones[seleccion] == config.get_localized_string(30161): titulo = item.fulltitle if titulo == "": titulo = item.title new_item = item.clone(title=titulo, action="play_from_library", category="Cine", fulltitle=item.fulltitle, channel=item.channel) from core import library library.add_pelicula_to_library(new_item) salir = True # "Buscar Trailer": elif opciones[seleccion] == config.get_localized_string(30162): config.set_setting("subtitulo", "false") xbmc.executebuiltin("XBMC.RunPlugin(%s?%s)" % (sys.argv[0], item.clone(channel="trailertools", action="buscartrailer", contextual=True).tourl())) salir = True return salir def get_video_seleccionado(item, seleccion, video_urls): logger.info("platformtools get_video_seleccionado") mediaurl = "" view = False wait_time = 0 mpd = False # Ha elegido uno de los vídeos if seleccion < len(video_urls): mediaurl = video_urls[seleccion][1] if len(video_urls[seleccion]) > 4: wait_time = video_urls[seleccion][2] item.subtitle = video_urls[seleccion][3] mpd = True elif len(video_urls[seleccion]) > 3: wait_time = video_urls[seleccion][2] item.subtitle = video_urls[seleccion][3] elif len(video_urls[seleccion]) > 2: wait_time = video_urls[seleccion][2] view = True # Si no hay mediaurl es porque el vídeo no está :) logger.info("pelisalacarta.platformcode.platformstools mediaurl=" + mediaurl) if mediaurl == "": if item.server == "unknown": alert_unsopported_server() else: alert_no_disponible_server(item.server) # Si hay un tiempo de espera (como en megaupload), lo impone ahora if wait_time > 0: continuar = handle_wait(wait_time, item.server, "Cargando vídeo...") if not continuar: mediaurl = "" return mediaurl, view, mpd def set_player(item, xlistitem, mediaurl, view, strm): logger.info("platformtools set_player") logger.debug("item:\n" + item.tostring('\n')) # Movido del conector "torrent" aqui if item.server == "torrent": play_torrent(item, xlistitem, mediaurl) return # Si es un fichero strm no hace falta el play elif strm: xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xlistitem) if item.subtitle != "": xbmc.sleep(2000) xbmc.Player().setSubtitles(item.subtitle) else: logger.info("player_mode=" + config.get_setting("player_mode")) logger.info("mediaurl=" + mediaurl) if config.get_setting("player_mode") == "3" or "megacrypter.com" in mediaurl: import download_and_play download_and_play.download_and_play(mediaurl, "download_and_play.tmp", config.get_setting("downloadpath")) return elif config.get_setting("player_mode") == "0" or \ (config.get_setting("player_mode") == "3" and mediaurl.startswith("rtmp")): # Añadimos el listitem a una lista de reproducción (playlist) playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) playlist.clear() playlist.add(mediaurl, xlistitem) # Reproduce playersettings = config.get_setting('player_type') logger.info("pelisalacarta.platformcode.platformstools playersettings=" + playersettings) if config.get_system_platform() == "xbox": player_type = xbmc.PLAYER_CORE_AUTO if playersettings == "0": player_type = xbmc.PLAYER_CORE_AUTO logger.info("pelisalacarta.platformcode.platformstools PLAYER_CORE_AUTO") elif playersettings == "1": player_type = xbmc.PLAYER_CORE_MPLAYER logger.info("pelisalacarta.platformcode.platformstools PLAYER_CORE_MPLAYER") elif playersettings == "2": player_type = xbmc.PLAYER_CORE_DVDPLAYER logger.info("pelisalacarta.platformcode.platformstools PLAYER_CORE_DVDPLAYER") xbmc_player = xbmc.Player(player_type) else: xbmc_player = xbmc.Player() xbmc_player.play(playlist, xlistitem) elif config.get_setting("player_mode") == "1": logger.info("mediaurl :" + mediaurl) logger.info("Tras setResolvedUrl") xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xbmcgui.ListItem(path=mediaurl)) elif config.get_setting("player_mode") == "2": xbmc.executebuiltin("PlayMedia(" + mediaurl + ")") # TODO MIRAR DE QUITAR VIEW if item.subtitle != "" and view: logger.info("Subtítulos externos: " + item.subtitle) xbmc.sleep(2000) xbmc.Player().setSubtitles(item.subtitle) def play_torrent(item, xlistitem, mediaurl): logger.info("platformtools play_torrent") # Opciones disponibles para Reproducir torrents torrent_options = list() torrent_options.append(["Cliente interno (necesario libtorrent)"]) torrent_options.append(["Cliente interno MCT (necesario libtorrent)"]) # Plugins externos se pueden añadir otros if xbmc.getCondVisibility('System.HasAddon("plugin.video.xbmctorrent")'): torrent_options.append(["Plugin externo: xbmctorrent", "plugin://plugin.video.xbmctorrent/play/%s"]) if xbmc.getCondVisibility('System.HasAddon("plugin.video.pulsar")'): torrent_options.append(["Plugin externo: pulsar", "plugin://plugin.video.pulsar/play?uri=%s"]) if xbmc.getCondVisibility('System.HasAddon("plugin.video.quasar")'): torrent_options.append(["Plugin externo: quasar", "plugin://plugin.video.quasar/play?uri=%s"]) if xbmc.getCondVisibility('System.HasAddon("plugin.video.stream")'): torrent_options.append(["Plugin externo: stream", "plugin://plugin.video.stream/play/%s"]) if xbmc.getCondVisibility('System.HasAddon("plugin.video.torrenter")'): torrent_options.append(["Plugin externo: torrenter", "plugin://plugin.video.torrenter/?action=playSTRM&url=%s"]) if xbmc.getCondVisibility('System.HasAddon("plugin.video.torrentin")'): torrent_options.append(["Plugin externo: torrentin", "plugin://plugin.video.torrentin/?uri=%s&image="]) if len(torrent_options) > 1: seleccion = dialog_select("Abrir torrent con...", [opcion[0] for opcion in torrent_options]) else: seleccion = 0 # Plugins externos if seleccion > 1: mediaurl = urllib.quote_plus(item.url) xbmc.executebuiltin("PlayMedia(" + torrent_options[seleccion][1] % mediaurl + ")") if seleccion == 1: from platformcode import mct mct.play(mediaurl, xlistitem, subtitle=item.subtitle) # Reproductor propio (libtorrent) if seleccion == 0: import time played = False debug = (config.get_setting("debug") == "true") # Importamos el cliente from btserver import Client clientTmpPath = config.get_setting("downloadpath") if not clientTmpPath: clientTmpPath = config.get_data_path() # Iniciamos el cliente: c = Client(url=mediaurl, is_playing_fnc=xbmc.Player().isPlaying, wait_time=None, timeout=10, temp_path=os.path.join(clientTmpPath, "pelisalacarta-torrent"), print_status=debug) # Mostramos el progreso progreso = dialog_progress("Pelisalacarta - Torrent", "Iniciando...") # Mientras el progreso no sea cancelado ni el cliente cerrado while not c.closed: try: # Obtenemos el estado del torrent s = c.status if debug: # Montamos las tres lineas con la info del torrent txt = '%.2f%% de %.1fMB %s | %.1f kB/s' % \ (s.progress_file, s.file_size, s.str_state, s._download_rate) txt2 = 'S: %d(%d) P: %d(%d) | DHT:%s (%d) | Trakers: %d' % \ (s.num_seeds, s.num_complete, s.num_peers, s.num_incomplete, s.dht_state, s.dht_nodes, s.trackers) txt3 = 'Origen Peers TRK: %d DHT: %d PEX: %d LSD %d ' % \ (s.trk_peers, s.dht_peers, s.pex_peers, s.lsd_peers) else: txt = '%.2f%% de %.1fMB %s | %.1f kB/s' % \ (s.progress_file, s.file_size, s.str_state, s._download_rate) txt2 = 'S: %d(%d) P: %d(%d)' % (s.num_seeds, s.num_complete, s.num_peers, s.num_incomplete) try: txt3 = 'Deteniendo automaticamente en: %ss' % (int(s.timeout)) except: txt3 = '' progreso.update(s.buffer, txt, txt2, txt3) time.sleep(0.5) if progreso.iscanceled(): progreso.close() if s.buffer == 100: if dialog_yesno("Pelisalacarta - Torrent", "¿Deseas iniciar la reproduccion?"): played = False progreso = dialog_progress("Pelisalacarta - Torrent", "") progreso.update(s.buffer, txt, txt2, txt3) else: progreso = dialog_progress("Pelisalacarta - Torrent", "") break else: if dialog_yesno("Pelisalacarta - Torrent", "¿Deseas cancelar el proceso?"): progreso = dialog_progress("Pelisalacarta - Torrent", "") break else: progreso = dialog_progress("Pelisalacarta - Torrent", "") progreso.update(s.buffer, txt, txt2, txt3) # Si el buffer se ha llenado y la reproduccion no ha sido iniciada, se inicia if s.buffer == 100 and not played: # Cerramos el progreso progreso.close() # Obtenemos el playlist del torrent videourl = c.get_play_list() # Iniciamos el reproductor playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO) playlist.clear() playlist.add(videourl, xlistitem) xbmc_player = xbmc.Player() xbmc_player.play(playlist) # Marcamos como reproducido para que no se vuelva a iniciar played = True # Y esperamos a que el reproductor se cierre while xbmc.Player().isPlaying(): time.sleep(1) # Cuando este cerrado, Volvemos a mostrar el dialogo progreso = dialog_progress("Pelisalacarta - Torrent", "") progreso.update(s.buffer, txt, txt2, txt3) except: import traceback logger.info(traceback.format_exc()) break progreso.update(100, "Terminando y eliminando datos", " ", " ") # Detenemos el cliente if not c.closed: c.stop() # Y cerramos el progreso progreso.close()
gpl-3.0
2,443,567,850,198,299,000
42.925293
122
0.578759
false
CLVsol/odoo_addons
clv_person_mng/history/__init__.py
1
1434
# -*- encoding: utf-8 -*- ################################################################################ # # # Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU Affero General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU Affero General Public License for more details. # # # # You should have received a copy of the GNU Affero General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # ################################################################################ import clv_person_mng_history
agpl-3.0
-1,074,229,779,788,836,500
70.7
80
0.410042
false
PyBossa/pybossa
test/factories/taskrun_factory.py
1
1933
# -*- coding: utf8 -*- # This file is part of PYBOSSA. # # Copyright (C) 2015 Scifabric LTD. # # PYBOSSA is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PYBOSSA is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>. from yacryptopan import CryptoPAn from pybossa.model.task_run import TaskRun from . import BaseFactory, factory, task_repo import settings_test cp = CryptoPAn(settings_test.CRYPTOPAN_KEY) class TaskRunFactory(BaseFactory): class Meta: model = TaskRun @classmethod def _create(cls, model_class, *args, **kwargs): taskrun = model_class(*args, **kwargs) task_repo.save(taskrun) return taskrun id = factory.Sequence(lambda n: n) task = factory.SubFactory('factories.TaskFactory') task_id = factory.LazyAttribute(lambda task_run: task_run.task.id) project = factory.SelfAttribute('task.project') project_id = factory.LazyAttribute(lambda task_run: task_run.project.id) user = factory.SubFactory('factories.UserFactory') user_id = factory.LazyAttribute(lambda task_run: task_run.user.id) info = dict(answer='yes') class AnonymousTaskRunFactory(TaskRunFactory): user = None user_id = None user_ip = cp.anonymize('127.0.0.1') info = 'yes' class ExternalUidTaskRunFactory(TaskRunFactory): user = None user_id = None user_ip = cp.anonymize('127.0.0.1') external_uid = '1xa' info = 'yes'
agpl-3.0
9,141,680,664,959,028,000
32.327586
77
0.713399
false
oldstylejoe/pychess-timed
lib/pychess/compat.py
1
1109
""" Some Python2/Python3 compatibility support helpers """ import sys PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 if PY3: basestring = str cmp = lambda x, y: (x > y) - (x < y) memoryview = memoryview open = open unichr = chr unicode = lambda x: x raw_input = input import builtins from html.entities import entitydefs from io import StringIO from urllib.parse import urlparse from configparser import SafeConfigParser from queue import Queue, Empty, Full from urllib.request import urlopen, url2pathname, pathname2url from urllib.parse import urlencode else: basestring = basestring cmp = cmp memoryview = buffer unicode = unicode unichr = unichr raw_input = raw_input import __builtin__ as builtins from htmlentitydefs import entitydefs from StringIO import StringIO from urlparse import urlparse from ConfigParser import SafeConfigParser from Queue import Queue, Empty, Full from urllib import urlopen, urlencode, url2pathname, pathname2url from io import open
gpl-3.0
4,246,670,998,720,175,600
24.790698
69
0.696123
false
SurveyMan/SMPy
example_survey.py
1
1248
from survey.objects import * import json """ This module provides an example of how to construct a questionnaire in Python. Questionnaires can be saved by calling jsonize and dumping their contents. Jsonized surveys can be reused, manipulated, and sent via RPC to another service. """ q1 = Question("What is your age?" , ["< 18", "18-34", "35-64", "> 65"] , qtypes["radio"]) q2 = Question("What is your political affiliation?" , ["Democrat", "Republican", "Indepedent"] , qtypes["radio"] , shuffle=True) q3 = Question("Which issues do you care about the most?" , ["Gun control", "Reproductive Rights", "The Economy", "Foreign Relations"] , qtypes["check"] ,shuffle=True) q4 = Question("What is your year of birth?" , [x+1910 for x in range(90)] , qtypes["dropdown"]) survey = Survey([q1, q2, q3, q4]) filename = 'jsonized_survey.txt' f = open(filename, 'w') f.write(json.dumps(survey.jsonize, sort_keys = True, indent = 4)) f.close() if __name__ == '__main__': import sys if len(sys.argv) > 1: filename = sys.argv[1] print "See "+filename+" for a jsonzied survey."
apache-2.0
-5,831,616,676,094,554,000
30.2
90
0.591346
false
chfritz/atom-sane-indentation
spec/fixtures/sample.py
1
4084
x = { "a": 1, 'b': 2 } x = [ 3, 4 ] x = [ (sname, pname) for (sno, sname) in suppliers for (pno, pname) in parts for (sp_sno, sp_pno) in suppart if sno == sp_sno and pno == sp_pno ] def testing_indentation_in_199(some_var): first_line = "auto complete enabled by default?" second_line = func_indentation(bracket_matched, but_indentation, still_broken, ) third_line = alt_indentation( long_param_list, still_not_working, ) fourth_line = existing_indentation('auto ident paste not turned on', 'so this is expected') this_is_not = 'indent error!' fourth_line = existing_indentation('retry last pasting', 'this works fine') raise IndentationError('ARGH!') enter_does_not_clear = 'still bad' even_with_blank_line = 'tab puts here' if test: more() foo() if this_works: no_surprise = True test() else: same_here = 'yep' if check_nested_indent is not None: basic_indent = True elif can_you_hear_me_now == False: also_works = True else: more() another_test = 'split_line' + \ 'should be indented' + \ 'at least once' return check_this def still_indented(other_var): '''Yay.''' indentation = 'hopelessly broken' if test: doit stopit while something(): x = 1 g = 2 while 0: x = 1 g = 2 else: x = 2 g = 4 def set_password(args): password = args.password while not password : password1 = getpass("" if args.quiet else "Provide password: ") password_repeat = getpass("" if args.quiet else "Repeat password: ") if password1 != password_repeat: print("Passwords do not match, try again") elif len(password1) < 4: print("Please provide at least 4 characters") else: password = password1 password_hash = passwd(password) cfg = BaseJSONConfigManager(config_dir=jupyter_config_dir()) cfg.update('jupyter_notebook_config', { 'NotebookApp': { 'password': password_hash, } }) if not args.quiet: print("password stored in config dir: %s" % jupyter_config_dir()) def main(argv): parser = argparse.ArgumentParser(argv[0]) subparsers = parser.add_subparsers() parser_password = subparsers.add_parser('password', help='sets a password for your notebook server') parser_password.add_argument("password", help="password to set, if not given, a password will be queried for (NOTE: this may not be safe)", nargs="?") parser_password.add_argument("--quiet", help="suppress messages", action="store_true") parser_password.set_defaults(function=set_password) args = parser.parse_args(argv[1:]) args.function(args) ## Comments # sfg # class TokenTests(unittest.TestCase): def testBackslash(self): # Backslash means line continuation: x = 1 \ + 1 self.assertEquals(x, 2, 'backslash for line continuation') # Backslash does not means continuation in comments :\ x = 0 self.assertEquals(x, 0, 'backslash ending comment') for s in '9223372036854775808', '0o2000000000000000000000', \ '0x10000000000000000', \ '0b100000000000000000000000000000000000000000000000000000000000000': try: x = eval(s) except OverflowError: self.fail("OverflowError on huge integer literal %r" % s) try: 1/0 except ZeroDivisionError: pass else: pass try: 1/0 except EOFError: pass except TypeError as msg: pass except RuntimeError as msg: pass except: pass else: pass try: 1/0 except (EOFError, TypeError, ZeroDivisionError): pass try: 1/0 except (EOFError, TypeError, ZeroDivisionError) as msg: pass try: pass finally: pass def foo(): while True: if test: more() more() elif test2: more2() else: bar() # --------------------------------------------- # TODO
mit
-4,141,579,375,323,475,000
21.31694
143
0.600392
false
FEniCS/ufl
ufl/finiteelement/mixedelement.py
1
19622
# -*- coding: utf-8 -*- "This module defines the UFL finite element classes." # Copyright (C) 2008-2016 Martin Sandve Alnæs # # This file is part of UFL (https://www.fenicsproject.org) # # SPDX-License-Identifier: LGPL-3.0-or-later # # Modified by Kristian B. Oelgaard # Modified by Marie E. Rognes 2010, 2012 # Modified by Anders Logg 2014 # Modified by Massimiliano Leoni, 2016 from ufl.log import error from ufl.permutation import compute_indices from ufl.utils.sequences import product, max_degree from ufl.utils.dicts import EmptyDict from ufl.utils.indexflattening import flatten_multiindex, unflatten_index, shape_to_strides from ufl.cell import as_cell from ufl.finiteelement.finiteelementbase import FiniteElementBase from ufl.finiteelement.finiteelement import FiniteElement class MixedElement(FiniteElementBase): """A finite element composed of a nested hierarchy of mixed or simple elements.""" __slots__ = ("_sub_elements", "_cells") def __init__(self, *elements, **kwargs): "Create mixed finite element from given list of elements" if type(self) is MixedElement: if kwargs: error("Not expecting keyword arguments to MixedElement constructor.") # Un-nest arguments if we get a single argument with a list of elements if len(elements) == 1 and isinstance(elements[0], (tuple, list)): elements = elements[0] # Interpret nested tuples as sub-mixedelements recursively elements = [MixedElement(e) if isinstance(e, (tuple, list)) else e for e in elements] self._sub_elements = elements # Pick the first cell, for now all should be equal cells = tuple(sorted(set(element.cell() for element in elements) - set([None]))) self._cells = cells if cells: cell = cells[0] # Require that all elements are defined on the same cell if not all(c == cell for c in cells[1:]): error("Sub elements must live on the same cell.") else: cell = None # Check that all elements use the same quadrature scheme TODO: # We can allow the scheme not to be defined. if len(elements) == 0: quad_scheme = None else: quad_scheme = elements[0].quadrature_scheme() if not all(e.quadrature_scheme() == quad_scheme for e in elements): error("Quadrature scheme mismatch for sub elements of mixed element.") # Compute value sizes in global and reference configurations value_size_sum = sum(product(s.value_shape()) for s in self._sub_elements) reference_value_size_sum = sum(product(s.reference_value_shape()) for s in self._sub_elements) # Default value shape: Treated simply as all subelement values # unpacked in a vector. value_shape = kwargs.get('value_shape', (value_size_sum,)) # Default reference value shape: Treated simply as all # subelement reference values unpacked in a vector. reference_value_shape = kwargs.get('reference_value_shape', (reference_value_size_sum,)) # Validate value_shape (deliberately not for subclasses # VectorElement and TensorElement) if type(self) is MixedElement: # This is not valid for tensor elements with symmetries, # assume subclasses deal with their own validation if product(value_shape) != value_size_sum: error("Provided value_shape doesn't match the " "total value size of all subelements.") # Initialize element data degrees = {e.degree() for e in self._sub_elements} - {None} degree = max_degree(degrees) if degrees else None FiniteElementBase.__init__(self, "Mixed", cell, degree, quad_scheme, value_shape, reference_value_shape) # Cache repr string if type(self) is MixedElement: self._repr = "MixedElement(%s)" % ( ", ".join(repr(e) for e in self._sub_elements),) def reconstruct_from_elements(self, *elements): "Reconstruct a mixed element from new subelements." if all(a == b for (a, b) in zip(elements, self._sub_elements)): return self return MixedElement(*elements) def symmetry(self): """Return the symmetry dict, which is a mapping :math:`c_0 \\to c_1` meaning that component :math:`c_0` is represented by component :math:`c_1`. A component is a tuple of one or more ints.""" # Build symmetry map from symmetries of subelements sm = {} # Base index of the current subelement into mixed value j = 0 for e in self._sub_elements: sh = e.value_shape() st = shape_to_strides(sh) # Map symmetries of subelement into index space of this # element for c0, c1 in e.symmetry().items(): j0 = flatten_multiindex(c0, st) + j j1 = flatten_multiindex(c1, st) + j sm[(j0,)] = (j1,) # Update base index for next element j += product(sh) if j != product(self.value_shape()): error("Size mismatch in symmetry algorithm.") return sm or EmptyDict def mapping(self): if all(e.mapping() == "identity" for e in self._sub_elements): return "identity" else: return "undefined" def num_sub_elements(self): "Return number of sub elements." return len(self._sub_elements) def sub_elements(self): "Return list of sub elements." return self._sub_elements def extract_subelement_component(self, i): """Extract direct subelement index and subelement relative component index for a given component index.""" if isinstance(i, int): i = (i,) self._check_component(i) # Select between indexing modes if len(self.value_shape()) == 1: # Indexing into a long vector of flattened subelement # shapes j, = i # Find subelement for this index for sub_element_index, e in enumerate(self._sub_elements): sh = e.value_shape() si = product(sh) if j < si: break j -= si if j < 0: error("Moved past last value component!") # Convert index into a shape tuple st = shape_to_strides(sh) component = unflatten_index(j, st) else: # Indexing into a multidimensional tensor where subelement # index is first axis sub_element_index = i[0] if sub_element_index >= len(self._sub_elements): error("Illegal component index (dimension %d)." % sub_element_index) component = i[1:] return (sub_element_index, component) def extract_component(self, i): """Recursively extract component index relative to a (simple) element and that element for given value component index.""" sub_element_index, component = self.extract_subelement_component(i) return self._sub_elements[sub_element_index].extract_component(component) def extract_subelement_reference_component(self, i): """Extract direct subelement index and subelement relative reference_component index for a given reference_component index.""" if isinstance(i, int): i = (i,) self._check_reference_component(i) # Select between indexing modes assert len(self.reference_value_shape()) == 1 # Indexing into a long vector of flattened subelement shapes j, = i # Find subelement for this index for sub_element_index, e in enumerate(self._sub_elements): sh = e.reference_value_shape() si = product(sh) if j < si: break j -= si if j < 0: error("Moved past last value reference_component!") # Convert index into a shape tuple st = shape_to_strides(sh) reference_component = unflatten_index(j, st) return (sub_element_index, reference_component) def extract_reference_component(self, i): """Recursively extract reference_component index relative to a (simple) element and that element for given value reference_component index.""" sub_element_index, reference_component = self.extract_subelement_reference_component(i) return self._sub_elements[sub_element_index].extract_reference_component(reference_component) def is_cellwise_constant(self, component=None): """Return whether the basis functions of this element is spatially constant over each cell.""" if component is None: return all(e.is_cellwise_constant() for e in self.sub_elements()) else: i, e = self.extract_component(component) return e.is_cellwise_constant() def degree(self, component=None): "Return polynomial degree of finite element." if component is None: return self._degree # from FiniteElementBase, computed as max of subelements in __init__ else: i, e = self.extract_component(component) return e.degree() def reconstruct(self, **kwargs): return MixedElement(*[e.reconstruct(**kwargs) for e in self.sub_elements()]) def __str__(self): "Format as string for pretty printing." tmp = ", ".join(str(element) for element in self._sub_elements) return "<Mixed element: (" + tmp + ")>" def shortstr(self): "Format as string for pretty printing." tmp = ", ".join(element.shortstr() for element in self._sub_elements) return "Mixed<" + tmp + ">" class VectorElement(MixedElement): "A special case of a mixed finite element where all elements are equal." def __init__(self, family, cell=None, degree=None, dim=None, form_degree=None, quad_scheme=None): """ Create vector element (repeated mixed element) *Arguments* family (string) The finite element family (or an existing FiniteElement) cell The geometric cell, ignored if family is a FiniteElement degree (int) The polynomial degree, ignored if family is a FiniteElement dim (int) The value dimension of the element (optional) form_degree (int) The form degree (FEEC notation, used when field is viewed as k-form), ignored if family is a FiniteElement quad_scheme The quadrature scheme (optional), ignored if family is a FiniteElement """ if isinstance(family, FiniteElementBase): sub_element = family cell = sub_element.cell() else: if cell is not None: cell = as_cell(cell) # Create sub element sub_element = FiniteElement(family, cell, degree, form_degree=form_degree, quad_scheme=quad_scheme) # Set default size if not specified if dim is None: if cell is None: error("Cannot infer vector dimension without a cell.") dim = cell.geometric_dimension() # Create list of sub elements for mixed element constructor sub_elements = [sub_element] * dim # Compute value shapes value_shape = (dim,) + sub_element.value_shape() reference_value_shape = (dim,) + sub_element.reference_value_shape() # Initialize element data MixedElement.__init__(self, sub_elements, value_shape=value_shape, reference_value_shape=reference_value_shape) FiniteElementBase.__init__(self, sub_element.family(), cell, sub_element.degree(), quad_scheme, value_shape, reference_value_shape) self._sub_element = sub_element # Cache repr string self._repr = "VectorElement(%s, dim=%d)" % ( repr(sub_element), len(self._sub_elements)) def reconstruct(self, **kwargs): sub_element = self._sub_element.reconstruct(**kwargs) return VectorElement(sub_element, dim=len(self.sub_elements())) def __str__(self): "Format as string for pretty printing." return ("<vector element with %d components of %s>" % (len(self._sub_elements), self._sub_element)) def shortstr(self): "Format as string for pretty printing." return "Vector<%d x %s>" % (len(self._sub_elements), self._sub_element.shortstr()) class TensorElement(MixedElement): """A special case of a mixed finite element where all elements are equal. """ __slots__ = ("_sub_element", "_shape", "_symmetry", "_sub_element_mapping", "_flattened_sub_element_mapping", "_mapping") def __init__(self, family, cell=None, degree=None, shape=None, symmetry=None, quad_scheme=None): """Create tensor element (repeated mixed element with optional symmetries). :arg family: The family string, or an existing FiniteElement. :arg cell: The geometric cell (ignored if family is a FiniteElement). :arg degree: The polynomial degree (ignored if family is a FiniteElement). :arg shape: The shape of the element (defaults to a square tensor given by the geometric dimension of the cell). :arg symmetry: Optional symmetries. :arg quad_scheme: Optional quadrature scheme (ignored if family is a FiniteElement).""" if isinstance(family, FiniteElementBase): sub_element = family cell = sub_element.cell() else: if cell is not None: cell = as_cell(cell) # Create scalar sub element sub_element = FiniteElement(family, cell, degree, quad_scheme=quad_scheme) # Set default shape if not specified if shape is None: if cell is None: error("Cannot infer tensor shape without a cell.") dim = cell.geometric_dimension() shape = (dim, dim) if symmetry is None: symmetry = EmptyDict elif symmetry is True: # Construct default symmetry dict for matrix elements if not (len(shape) == 2 and shape[0] == shape[1]): error("Cannot set automatic symmetry for non-square tensor.") symmetry = dict(((i, j), (j, i)) for i in range(shape[0]) for j in range(shape[1]) if i > j) else: if not isinstance(symmetry, dict): error("Expecting symmetry to be None (unset), True, or dict.") # Validate indices in symmetry dict for i, j in symmetry.items(): if len(i) != len(j): error("Non-matching length of symmetry index tuples.") for k in range(len(i)): if not (i[k] >= 0 and j[k] >= 0 and i[k] < shape[k] and j[k] < shape[k]): error("Symmetry dimensions out of bounds.") # Compute all index combinations for given shape indices = compute_indices(shape) # Compute mapping from indices to sub element number, # accounting for symmetry sub_elements = [] sub_element_mapping = {} for index in indices: if index in symmetry: continue sub_element_mapping[index] = len(sub_elements) sub_elements += [sub_element] # Update mapping for symmetry for index in indices: if index in symmetry: sub_element_mapping[index] = sub_element_mapping[symmetry[index]] flattened_sub_element_mapping = [sub_element_mapping[index] for i, index in enumerate(indices)] # Compute value shape value_shape = shape # Compute reference value shape based on symmetries if symmetry: # Flatten and subtract symmetries reference_value_shape = (product(shape) - len(symmetry),) self._mapping = "symmetries" else: # Do not flatten if there are no symmetries reference_value_shape = shape self._mapping = "identity" value_shape = value_shape + sub_element.value_shape() reference_value_shape = reference_value_shape + sub_element.reference_value_shape() # Initialize element data MixedElement.__init__(self, sub_elements, value_shape=value_shape, reference_value_shape=reference_value_shape) self._family = sub_element.family() self._degree = sub_element.degree() self._sub_element = sub_element self._shape = shape self._symmetry = symmetry self._sub_element_mapping = sub_element_mapping self._flattened_sub_element_mapping = flattened_sub_element_mapping # Cache repr string self._repr = "TensorElement(%s, shape=%s, symmetry=%s)" % ( repr(sub_element), repr(self._shape), repr(self._symmetry)) def mapping(self): if self._symmetry: return "symmetries" else: return "identity" def flattened_sub_element_mapping(self): return self._flattened_sub_element_mapping def extract_subelement_component(self, i): """Extract direct subelement index and subelement relative component index for a given component index.""" if isinstance(i, int): i = (i,) self._check_component(i) i = self.symmetry().get(i, i) l = len(self._shape) # noqa: E741 ii = i[:l] jj = i[l:] if ii not in self._sub_element_mapping: error("Illegal component index %s." % (i,)) k = self._sub_element_mapping[ii] return (k, jj) def symmetry(self): """Return the symmetry dict, which is a mapping :math:`c_0 \\to c_1` meaning that component :math:`c_0` is represented by component :math:`c_1`. A component is a tuple of one or more ints.""" return self._symmetry def reconstruct(self, **kwargs): sub_element = self._sub_element.reconstruct(**kwargs) return TensorElement(sub_element, shape=self._shape, symmetry=self._symmetry) def __str__(self): "Format as string for pretty printing." if self._symmetry: tmp = ", ".join("%s -> %s" % (a, b) for (a, b) in self._symmetry.items()) sym = " with symmetries (%s)" % tmp else: sym = "" return ("<tensor element with shape %s of %s%s>" % (self.value_shape(), self._sub_element, sym)) def shortstr(self): "Format as string for pretty printing." if self._symmetry: tmp = ", ".join("%s -> %s" % (a, b) for (a, b) in self._symmetry.items()) sym = " with symmetries (%s)" % tmp else: sym = "" return "Tensor<%s x %s%s>" % (self.value_shape(), self._sub_element.shortstr(), sym)
lgpl-3.0
-8,729,777,980,419,253,000
39.539256
103
0.588859
false
oudalab/phyllo
phyllo/extractors/johannesDB.py
1
2507
import sqlite3 import urllib import re from urllib.request import urlopen from bs4 import BeautifulSoup, NavigableString import nltk nltk.download('punkt') from nltk import sent_tokenize def parseRes2(soup, title, url, cur, author, date, collectiontitle): chapter = 0 sen = "" num = 1 [e.extract() for e in soup.find_all('br')] [e.extract() for e in soup.find_all('table')] [e.extract() for e in soup.find_all('span')] [e.extract() for e in soup.find_all('a')] for x in soup.find_all(): if len(x.text) == 0: x.extract() getp = soup.find_all('p') #print(getp) i = 0 for p in getp: # make sure it's not a paragraph without the main text try: if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin', 'internal_navigation']: # these are not part of the main t continue except: pass if p.b: chapter = p.b.text chapter = chapter.strip() else: sen = p.text sen = sen.strip() if sen != '': num = 0 for s in sent_tokenize(sen): sentn = s.strip() num += 1 cur.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)", (None, collectiontitle, title, 'Latin', author, date, chapter, num, sentn, url, 'prose')) def main(): # get proper URLs siteURL = 'http://www.thelatinlibrary.com' biggsURL = 'http://www.thelatinlibrary.com/johannes.html' biggsOPEN = urllib.request.urlopen(biggsURL) biggsSOUP = BeautifulSoup(biggsOPEN, 'html5lib') textsURL = [] title = 'Johannes de Plano Carpini' author = title collectiontitle = 'JOHANNES DE PLANO CARPINI LIBELLUS HISTORICUS IOANNIS DE PLANO CARPINI' date = '1246 A.D.' with sqlite3.connect('texts.db') as db: c = db.cursor() c.execute( 'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,' ' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,' ' link TEXT, documentType TEXT)') c.execute("DELETE FROM texts WHERE author = 'Johannes de Plano Carpini'") parseRes2(biggsSOUP, title, biggsURL, c, author, date, collectiontitle) if __name__ == '__main__': main()
apache-2.0
-3,149,571,058,116,537,300
31.986842
103
0.55724
false
mike-perdide/gfbi_core
tests/test1.py
1
7715
from subprocess import Popen, PIPE from gfbi_core.git_model import GitModel from gfbi_core.editable_git_model import EditableGitModel from gfbi_core.util import Index, Timezone from git.objects.util import altz_to_utctz_str from datetime import datetime import os import time REPOSITORY_NAME = "/tmp/tests_git" AVAILABLE_CHOICES = ['hexsha', 'authored_date', 'committed_date', 'author_name', 'committer_name', 'author_email', 'committer_email', 'message'] def run_command(command): # print "Running: %s" % command process = Popen(command, shell=True, stdout=PIPE) process.wait() def create_repository(): run_command('rm -rf ' + REPOSITORY_NAME) run_command('mkdir ' + REPOSITORY_NAME) os.chdir(REPOSITORY_NAME) run_command('git init') run_command('echo init > init_file') run_command('git add init_file') command = commit( "Initial commit", author_name="Wallace Henry", author_email="wh@jp.com", author_date="Sun Mar 11 12:00:00 2012 +0100", committer_name="Wallace Henry", committer_email="wh@jp.com", committer_date="Sun Mar 11 12:00:00 2012 +0100" ) run_command('git branch wallace_branch') def populate_repository(): for value in xrange(20, 25): command = 'echo "%d" > %d' % (value, value) run_command(command) run_command('git add %d' % value) commit( str(value), author_name="Wallace Henry", author_email="wh@jp.com", author_date="Sun Mar 11 12:10:%d 2012 +0100" % value, committer_name="Wallace Henry", committer_email="wh@jp.com", committer_date="Sun Mar 11 12:10:%d 2012 +0100" % value ) run_command('git checkout wallace_branch') for value in xrange(20, 25): command = 'echo "branch_%d" > branch_%d' % (value, value) run_command(command) run_command('git add branch_%d' % value) commit( "branch_" + str(value), author_name="Wallace Henry", author_email="wh@jp.com", author_date="Sun Mar 11 12:20:%d 2012 +0100" % value, committer_name="Wallace Henry", committer_email="wh@jp.com", committer_date="Sun Mar 11 12:20:%d 2012 +0100" % value ) def commit(message, author_name=None, author_email=None, author_date=None, committer_name=None, committer_email=None, committer_date=None): command = '' if author_name: command += 'GIT_AUTHOR_NAME="%s" ' % author_name if author_email: command += 'GIT_AUTHOR_EMAIL="%s" ' % author_email if author_date: command += 'GIT_AUTHOR_DATE="%s" ' % author_date if committer_name: command += 'GIT_COMMITTER_NAME="%s" ' % committer_name if committer_email: command += 'GIT_COMMITTER_EMAIL="%s" ' % committer_email if committer_date: command += 'GIT_COMMITTER_DATE="%s" ' % committer_date command += 'git commit -m "%s"' % message run_command(command) def write_and_wait(model): model.write() total_wait = 0 time.sleep(1) while not model.is_finished_writing(): if total_wait > 15: raise Exception("We waited too long for the writing process") time.sleep(1) total_wait += 1 def pretty_print_from_row(model, row): line = "" for col in xrange(len(AVAILABLE_CHOICES)): value = model.data(Index(row, col)) if col == 0: value = value[:7] elif col in (1, 2): tmstp, tz = value _dt = datetime.fromtimestamp(float(tmstp)).replace(tzinfo=tz) date_format = "%d/%m/%Y %H:%M:%S" value = _dt.strftime(date_format) value = tmstp line += "[" + str(value) + "] " return line def test_field_has_changed(test_row, test_column, test_value): our_model = EditableGitModel(REPOSITORY_NAME) our_model.populate() # print "====================================== Before the write" # for row in xrange(our_model.row_count()): # print pretty_print_from_row(our_model, row) # print "=======================================================" index = Index(test_row, test_column) our_model.start_history_event() our_model.set_data(index, test_value) write_and_wait(our_model) new_model = GitModel(REPOSITORY_NAME) new_model.populate() new_model_value = new_model.data(index) # print "======================================= After the write" # for row in xrange(our_model.row_count()): # print pretty_print_from_row(new_model, row) # print "=======================================================" if test_column in (1, 2): assert new_model_value[0] == test_value[0] and \ new_model_value[1].tzname("") == test_value[1].tzname(""), \ "The %s field wasn't changed correctly" % \ AVAILABLE_CHOICES[test_column] else: assert new_model_value == test_value, \ "The %s field wasn't changed correctly" % \ AVAILABLE_CHOICES[test_column] for row in xrange(our_model.row_count()): for column in xrange(1, our_model.column_count()): if (row == test_row and column == test_column): continue index = Index(row, column) our_value = our_model.data(index) new_value = new_model.data(index) if column in (1, 2): our_value, tz = our_value # print our_value, tz.tzname(None) new_value, tz = new_value # print new_value, tz.tzname(None) assert our_value == new_value, \ "Something else has change: (%d, %d)\ncolumn:%s\n" % \ (row, column, AVAILABLE_CHOICES[column]) + \ "%s\n%s\n%s\n" % \ (AVAILABLE_CHOICES, pretty_print_from_row(our_model, row), pretty_print_from_row(new_model, row)) + \ "%s // %s" % (our_value, new_value) def test_cant_apply_changed_repo(): a_model = EditableGitModel(REPOSITORY_NAME) a_model.populate() os.chdir(REPOSITORY_NAME) run_command("echo new > new_file") run_command("git add new_file") command = commit("new input") msg_col = a_model.get_column("message") index = Index(0, msg_col) a_model.start_history_event() orig_msg = a_model.data(index) a_model.set_data(index, "whatever change") try: write_and_wait(a_model) write_faled = False except: write_failed = True a_model = EditableGitModel(REPOSITORY_NAME) a_model.populate() new_msg = a_model.data(index) prev_msg= a_model.data(Index(1, msg_col)) error = "The write didn't fail on a modified repository" assert (write_failed and new_msg == "new input\n" and prev_msg == orig_msg), error create_repository() populate_repository() a_model = EditableGitModel(REPOSITORY_NAME) a_model.populate() columns = a_model.get_columns() print "Test authored date" authored_date_col = columns.index("authored_date") test_field_has_changed(2, authored_date_col, (1331465000, Timezone('+0100')) ) print "Test name" author_name_col = columns.index("author_name") test_field_has_changed(4, author_name_col, "JeanJean") print "Test message" message_col = columns.index("message") test_field_has_changed(3, message_col, "Boing boing boing") print "Test can't apply changed" test_cant_apply_changed_repo()
gpl-3.0
-2,993,049,151,893,016,600
32.689956
78
0.572391
false
sketchyfish/ypp-price-calculator
strip_automation.py
1
5368
#!/usr/bin/python """ This code uses .strip formatting once to remove the \n and another time to remove the \t from the below lists. For readability, the script uses a print("\n") to add a new line between the two lists """ island_list = ['Armstrong Island', 'Atchafalaya Island', 'Immokalee Island', 'Moultrie Island', 'Sho-ke Island', 'Sirius Island', 'Tumult Island', 'The Beaufort Islands', "Messier's Crown", 'Nunataq Island', 'Paollu Island', 'Qaniit Island', 'Ancoraggio Island', 'Fluke Island', 'Kakraphoon Island', 'Eagle Archipelago', 'Cambium Island', "Hubble's Eye", 'Ilha da Aguia', 'Ix Chel', 'Manu Island', 'Admiral Island', 'Basset Island', 'Bryher Island', 'Cromwell Island', 'Hook Shelf', 'Isle of Kent', 'Lincoln Island', 'Wensleydale', 'Anegada Island', 'Barnard Island', 'The Lowland Hundred', 'Lyonesse Island', 'Myvatn Island', 'Arakoua Island', 'Aten Island', 'Barbary Island', 'Caravanserai Island', 'Kasidim Island', 'Kiwara Island', 'Terjit Island', 'Tichka Plateau', 'Aimuari Island', 'Chachapoya Island', 'Matariki Island', 'Pukru Island', 'Quetzal Island', 'Saiph Island', 'Toba Island', 'Albatross Island', 'Ambush Island', 'Deadlight Dunes', 'Gauntlet Island', "Jack's Last Gift", 'Mirage Island', 'Scurvy Reef', 'Blackthorpe Island', 'Cook Island', 'Descartes Isle', 'Fowler Island', 'Greenwich Island', 'Halley Island', 'Spaniel Island', 'Starfish Island', 'Ventress Island', 'Accompong Island', 'Gallows Island', 'Iocane Island', 'Maia Island', 'Morgana Island', 'Paihia Island', 'Umbarten Island', 'Auk Island', 'Cryo Island', 'Hoarfrost Island', 'Amity Island', 'Bowditch Island', 'Hinga Island', 'Penobscot Island', 'Rowes Island', 'Scrimshaw Island', 'Squibnocket Island', 'Wissahickon Island', 'Ashkelon Arch', 'Kashgar Island', 'Morannon Island', 'Alkaid Island', 'Doyle Island', "Edgar's Choice", 'Isle of Keris', 'Marlowe Island', "McGuffin's Isle", 'Sayers Rock'] commodity_list = [['Hemp', 'Hemp oil', 'Iron', "Kraken's ink", 'Lacquer', 'Stone', 'Sugar cane', 'Varnish', 'Wood', '', 'Broom flower', 'Butterfly weed', 'Cowslip', 'Elderberries', 'Indigo', 'Iris root', 'Lily of the valley', 'Lobelia', 'Madder', 'Nettle', "Old man's beard", 'Pokeweed berries', 'Sassafras', 'Weld', 'Yarrow', '', 'Chalcocite', 'Cubanite', 'Gold nugget', 'Lorandite', 'Leushite', 'Masuyite', 'Papagoite', 'Serandite', 'Sincosite', 'Tellurium', 'Thorianite', '', 'Bananas', 'Carambolas', 'Coconuts', 'Durians', 'Limes', 'Mangos', 'Passion fruit', 'Pineapples', 'Pomegranates', 'Rambutan', 'Amber gems', 'Amethyst gems', 'Beryl gems', 'Coral gems', 'Diamonds', 'Emeralds', 'Jade gems', 'Jasper gems', 'Jet gems', 'Lapis lazuli gems', ' ', 'Moonstones', 'Opals', 'Pearls', 'Quartz gems', 'Rubies', 'Sapphires', 'Tigereye gems', 'Topaz gems', 'Gold nuggets (mineral)', '', 'Swill', 'Grog', 'Fine rum', 'Small, medium, and large cannon balls', 'Lifeboats', '', 'Aqua cloth', 'Black cloth', 'Blue cloth', 'Brown cloth', 'Gold cloth', 'Green cloth', 'Grey cloth', 'Lavender cloth', 'Light green cloth', 'Lime cloth', 'Magenta cloth', 'Maroon cloth', 'Mint cloth', 'Navy cloth', 'Orange cloth', 'Pink cloth', 'Purple cloth', 'Red cloth', 'Rose cloth', 'Tan cloth', 'Violet cloth', 'White cloth', 'Yellow cloth', 'Fine aqua cloth', 'Fine black cloth', 'Fine blue cloth', 'Fine brown cloth', 'Fine gold cloth', 'Fine green cloth', 'Fine grey cloth', 'Fine lavender cloth', 'Fine light green cloth', 'Fine lime cloth', 'Fine magenta cloth', 'Fine maroon cloth', 'Fine mint cloth', ' ', 'Fine navy cloth', 'Fine orange cloth', 'Fine pink cloth', 'Fine purple cloth', 'Fine red cloth', 'Fine rose cloth', 'Fine tan cloth', 'Fine violet cloth', 'Fine white cloth', 'Fine yellow cloth', 'Sail cloth', '', 'Blue dye', 'Green dye', "Kraken's blood", 'Red dye', 'Yellow dye', '', 'Aqua enamel', 'Black enamel', 'Blue enamel', 'Brown enamel', 'Gold enamel', 'Green enamel', 'Grey enamel', 'Lavender enamel', 'Light green enamel', 'Lime enamel', 'Magenta enamel', 'Maroon enamel', 'Mint enamel', 'Navy enamel', 'Orange enamel', 'Pink enamel', 'Purple enamel', 'Red enamel', 'Rose enamel', 'Tan enamel', 'Violet enamel', 'White enamel', 'Yellow enamel', '', 'Aqua paint', 'Black paint', 'Blue paint', 'Brown paint', 'Gold paint', 'Green paint', 'Grey paint', 'Lavender paint', 'Light green paint', 'Lime paint', 'Magenta paint', 'Maroon paint', 'Mint paint', 'Navy paint', 'Orange paint', 'Pink paint', 'Purple paint', 'Red paint', 'Rose paint', 'Tan paint', 'Violet paint', 'White paint', 'Yellow paint']] newi_list = [] newc_list = [] for each_item in island_list: b = each_item.strip("\n") c = b.strip("\t") newi_list.append(c) for each_item in commodity_list: b = each_item.strip("\n") c = b.strip("\t") newc_list.append(c) print(newi_list) print("\n") print(newc_list)
gpl-2.0
-3,883,570,465,021,505,500
91.551724
162
0.607116
false
markgw/pimlico
src/python/pimlico/cli/testemail.py
1
1311
# This file is part of Pimlico # Copyright (C) 2020 Mark Granroth-Wilding # Licensed under the GNU LGPL v3.0 - https://www.gnu.org/licenses/lgpl-3.0.en.html from __future__ import print_function from pimlico.cli.subcommands import PimlicoCLISubcommand from pimlico.utils.email import send_pimlico_email class EmailCmd(PimlicoCLISubcommand): command_name = "email" command_help = "Test email settings and try sending an email using them" def run_command(self, pipeline, opts): content = """\ This email is a test sent from Pimlico pipeline '%s'. If you issued the email test command, your email settings are now working and you can use Pimlico's email notification features. If you were not expecting this email, someone has perhaps typed your email address into their settings by accident. Please ignore it. The sender no doubt apologizes for their mistake. """ % pipeline.name # Send a dummy email to see if email sending works data = send_pimlico_email("Test email from Pimlico", content, pipeline.local_config, pipeline.log) if data["success"]: print("Email sending worked: check your email (%s) to see if the test message has arrived" % \ ", ".join(data["recipients"])) else: print("Email sending failed")
gpl-3.0
2,851,914,663,193,059,300
39.96875
106
0.710145
false
ellmetha/django-machina
tests/unit/apps/forum_conversation/test_forms.py
1
16391
import pytest from django import forms from django.contrib.auth.models import AnonymousUser from faker import Faker from machina.apps.forum_conversation.forms import PostForm, TopicForm from machina.conf import settings as machina_settings from machina.core.db.models import get_model from machina.core.loading import get_class from machina.test.factories import ( PostFactory, TopicPollFactory, UserFactory, create_forum, create_topic ) faker = Faker() ForumReadTrack = get_model('forum_tracking', 'ForumReadTrack') Post = get_model('forum_conversation', 'Post') Topic = get_model('forum_conversation', 'Topic') TopicReadTrack = get_model('forum_tracking', 'TopicReadTrack') PermissionHandler = get_class('forum_permission.handler', 'PermissionHandler') assign_perm = get_class('forum_permission.shortcuts', 'assign_perm') @pytest.mark.django_db class TestPostForm(object): @pytest.fixture(autouse=True) def setup(self): # Permission handler self.perm_handler = PermissionHandler() # Create a basic user self.user = UserFactory.create() # Set up a top-level forum self.top_level_forum = create_forum() # Set up a topic and some posts self.topic = create_topic(forum=self.top_level_forum, poster=self.user) self.post = PostFactory.create(topic=self.topic, poster=self.user) # Assign some permissions assign_perm('can_read_forum', self.user, self.top_level_forum) assign_perm('can_start_new_topics', self.user, self.top_level_forum) def test_can_valid_a_basic_post(self): # Setup form_data = { 'subject': 'Re: {}'.format(faker.text(max_nb_chars=200)), 'content': '[b]{}[/b]'.format(faker.text()), } # Run form = PostForm( data=form_data, user=self.user, forum=self.top_level_forum, topic=self.topic) valid = form.is_valid() # Check assert valid def test_can_affect_a_default_subject_to_the_post(self): # Setup form_data = { 'content': '[b]{}[/b]'.format(faker.text()), } # Run form = PostForm( data=form_data, user=self.user, forum=self.top_level_forum, topic=self.topic) # Check default_subject = '{} {}'.format( machina_settings.TOPIC_ANSWER_SUBJECT_PREFIX, self.topic.subject) assert form.fields['subject'].initial == default_subject def test_increments_the_post_updates_counter_in_case_of_post_edition(self): # Setup form_data = { 'subject': 'Re: {}'.format(faker.text(max_nb_chars=200)), 'content': '[b]{}[/b]'.format(faker.text()), } initial_updates_count = self.post.updates_count # Run form = PostForm( data=form_data, user=self.user, forum=self.top_level_forum, topic=self.topic, instance=self.post) # Check assert form.is_valid() form.save() self.post.refresh_from_db() assert self.post.updates_count == initial_updates_count + 1 def test_set_the_topic_as_unapproved_if_the_user_has_not_the_required_permission(self): # Setup form_data = { 'subject': faker.text(max_nb_chars=200), 'content': '[b]{}[/b]'.format(faker.text()), } # Run form_kwargs = { 'data': form_data, 'user': self.user, 'forum': self.top_level_forum, 'topic': self.topic, } form = PostForm(**form_kwargs) # Check assert form.is_valid() post = form.save() assert not post.approved assign_perm('can_post_without_approval', self.user, self.top_level_forum) form = PostForm(**form_kwargs) assert form.is_valid() post = form.save() assert post.approved def test_adds_the_username_field_if_the_user_is_anonymous(self): # Setup form_data = { 'subject': faker.text(max_nb_chars=200), 'content': '[b]{}[/b]'.format(faker.text()), 'username': 'testname', } user = AnonymousUser() user.forum_key = '1234' # Run form = PostForm( data=form_data, user=user, forum=self.top_level_forum, topic=self.topic) # Check assert 'username' in form.fields assert form.is_valid() post = form.save() assert post.username == 'testname' def test_adds_the_update_reason_field_if_the_post_is_updated(self): # Setup form_data = { 'subject': faker.text(max_nb_chars=200), 'content': '[b]{}[/b]'.format(faker.text()), 'update_reason': 'X', } # Run form = PostForm( data=form_data, user=self.user, forum=self.top_level_forum, topic=self.topic, instance=self.post) # Check assert 'update_reason' in form.fields assert form.is_valid() post = form.save() assert post.update_reason == 'X' def test_can_allow_a_user_to_lock_a_topic_if_he_has_the_permission_to_lock_topics(self): # Setup assign_perm('can_lock_topics', self.user, self.top_level_forum) form_data = { 'subject': faker.text(max_nb_chars=200), 'content': faker.text(), 'lock_topic': True, } # Run form = PostForm( data=form_data, user=self.user, forum=self.top_level_forum, topic=self.topic, instance=self.post) # Check assert 'lock_topic' in form.fields assert form.is_valid() post = form.save() post.refresh_from_db() assert post.topic.is_locked def test_cannot_allow_a_user_to_lock_a_topic_if_he_has_not_the_permission_to_lock_topics(self): # Setup form_data = { 'subject': faker.text(max_nb_chars=200), 'content': faker.text(), 'lock_topic': True, } # Run form = PostForm( data=form_data, user=self.user, forum=self.top_level_forum, topic=self.topic, instance=self.post) # Check assert 'lock_topic' not in form.fields assert form.is_valid() post = form.save() post.refresh_from_db() assert not post.topic.is_locked def test_cannot_overwrite_the_original_poster_when_a_post_is_edited_by_another_user(self): # Setup user = UserFactory.create() assign_perm('can_read_forum', user, self.top_level_forum) assign_perm('can_start_new_topics', user, self.top_level_forum) form_data = { 'subject': faker.text(max_nb_chars=200), 'content': faker.text(), } # Run form = PostForm( data=form_data, user=user, forum=self.top_level_forum, topic=self.topic, instance=self.post) # Check assert form.is_valid() post = form.save() assert post.poster == self.user @pytest.mark.django_db class TestTopicForm(object): @pytest.fixture(autouse=True) def setup(self): # Permission handler self.perm_handler = PermissionHandler() # Create a basic user self.user = UserFactory.create() # Set up a top-level forum self.top_level_forum = create_forum() # Set up a topic and some posts self.topic = create_topic(forum=self.top_level_forum, poster=self.user) self.post = PostFactory.create(topic=self.topic, poster=self.user) # Assign some permissions assign_perm('can_read_forum', self.user, self.top_level_forum) assign_perm('can_start_new_topics', self.user, self.top_level_forum) def test_can_valid_a_basic_topic(self): # Setup form_data = { 'subject': faker.text(max_nb_chars=200), 'content': '[b]{}[/b]'.format(faker.text()), 'topic_type': Topic.TOPIC_POST, } # Run form = TopicForm( data=form_data, user=self.user, forum=self.top_level_forum) valid = form.is_valid() # Check assert valid def test_can_valid_a_basic_sticky_post(self): # Setup form_data = { 'subject': faker.text(max_nb_chars=200), 'content': '[b]{}[/b]'.format(faker.text()), 'topic_type': Topic.TOPIC_STICKY, } assign_perm('can_post_stickies', self.user, self.top_level_forum) # Run form = TopicForm( data=form_data, user=self.user, forum=self.top_level_forum) valid = form.is_valid() # Check assert valid def test_can_valid_a_basic_announce(self): # Setup form_data = { 'subject': faker.text(max_nb_chars=200), 'content': '[b]{}[/b]'.format(faker.text()), 'topic_type': Topic.TOPIC_ANNOUNCE, } assign_perm('can_post_announcements', self.user, self.top_level_forum) # Run form = TopicForm( data=form_data, user=self.user, forum=self.top_level_forum) valid = form.is_valid() # Check assert valid def test_creates_a_post_topic_if_no_topic_type_is_provided(self): # Setup form_data = { 'subject': '{}'.format(faker.text(max_nb_chars=200)), 'content': '[b]{}[/b]'.format(faker.text()), } # Run form = TopicForm( data=form_data, user=self.user, forum=self.top_level_forum) valid = form.is_valid() # Check assert valid post = form.save() assert post.topic.type == Topic.TOPIC_POST def test_allows_the_creation_of_stickies_if_the_user_has_required_permission(self): # Setup form_data = { 'subject': faker.text(max_nb_chars=200), 'content': '[b]{}[/b]'.format(faker.text()), 'topic_type': Topic.TOPIC_STICKY, } form_kwargs = { 'data': form_data, 'user': self.user, 'forum': self.top_level_forum, } # Run & check form = TopicForm(**form_kwargs) assert not form.is_valid() choices = [ch[0] for ch in form.fields['topic_type'].choices] assert Topic.TOPIC_STICKY not in choices assign_perm('can_post_stickies', self.user, self.top_level_forum) form = TopicForm(**form_kwargs) assert form.is_valid() choices = [ch[0] for ch in form.fields['topic_type'].choices] assert Topic.TOPIC_STICKY in choices def test_allows_the_creation_of_announces_if_the_user_has_required_permission(self): # Setup form_data = { 'subject': faker.text(max_nb_chars=200), 'content': '[b]{}[/b]'.format(faker.text()), 'topic_type': Topic.TOPIC_ANNOUNCE, } form_kwargs = { 'data': form_data, 'user': self.user, 'forum': self.top_level_forum, } # Run & check form = TopicForm(**form_kwargs) assert not form.is_valid() choices = [ch[0] for ch in form.fields['topic_type'].choices] assert Topic.TOPIC_ANNOUNCE not in choices assign_perm('can_post_announcements', self.user, self.top_level_forum) form = TopicForm(**form_kwargs) assert form.is_valid() choices = [ch[0] for ch in form.fields['topic_type'].choices] assert Topic.TOPIC_ANNOUNCE in choices def test_can_be_used_to_update_the_topic_type(self): # Setup form_data = { 'subject': 'Re: {}'.format(faker.text(max_nb_chars=200)), 'content': '[b]{}[/b]'.format(faker.text()), 'topic_type': Topic.TOPIC_STICKY, } assign_perm('can_post_stickies', self.user, self.top_level_forum) # Run form = TopicForm( data=form_data, user=self.user, forum=self.top_level_forum, topic=self.topic, instance=self.post) # Check assert form.is_valid() form.save() self.topic.refresh_from_db() assert self.topic.type == Topic.TOPIC_STICKY def test_can_append_poll_fields_if_the_user_is_allowed_to_create_polls(self): # Setup form_data = { 'subject': 'Re: {}'.format(faker.text(max_nb_chars=200)), 'content': '[b]{}[/b]'.format(faker.text()), 'topic_type': Topic.TOPIC_STICKY, } assign_perm('can_create_polls', self.user, self.top_level_forum) # Run form = TopicForm( data=form_data, user=self.user, forum=self.top_level_forum, topic=self.topic, instance=self.post) # Check assert 'poll_question' in form.fields assert 'poll_max_options' in form.fields assert 'poll_duration' in form.fields assert 'poll_user_changes' in form.fields assert isinstance(form.fields['poll_question'], forms.CharField) assert isinstance(form.fields['poll_max_options'], forms.IntegerField) assert isinstance(form.fields['poll_duration'], forms.IntegerField) assert isinstance(form.fields['poll_user_changes'], forms.BooleanField) def test_cannot_append_poll_fields_if_the_user_is_not_allowed_to_create_polls(self): # Setup form_data = { 'subject': 'Re: {}'.format(faker.text(max_nb_chars=200)), 'content': '[b]{}[/b]'.format(faker.text()), 'topic_type': Topic.TOPIC_STICKY, } # Run form = TopicForm( data=form_data, user=self.user, forum=self.top_level_forum, topic=self.topic, instance=self.post) # Check assert 'poll_question' not in form.fields assert 'poll_max_options' not in form.fields assert 'poll_duration' not in form.fields assert 'poll_user_changes' not in form.fields def test_can_initialize_poll_fields_from_topic_related_poll_object(self): # Setup form_data = { 'subject': 'Re: {}'.format(faker.text(max_nb_chars=200)), 'content': '[b]{}[/b]'.format(faker.text()), 'topic_type': Topic.TOPIC_STICKY, } assign_perm('can_create_polls', self.user, self.top_level_forum) poll = TopicPollFactory.create(topic=self.post.topic) # Run form = TopicForm( data=form_data, user=self.user, forum=self.top_level_forum, topic=self.topic, instance=self.post) # Check assert 'poll_question' in form.fields assert 'poll_max_options' in form.fields assert 'poll_duration' in form.fields assert 'poll_user_changes' in form.fields assert form.fields['poll_question'].initial == poll.question assert form.fields['poll_max_options'].initial == poll.max_options assert form.fields['poll_duration'].initial == poll.duration assert form.fields['poll_user_changes'].initial == poll.user_changes def test_cannot_allow_users_to_create_polls_without_settings_the_maximum_options_number(self): # Setup form_data = { 'subject': faker.text(max_nb_chars=200), 'content': '[b]{}[/b]'.format(faker.text()), 'topic_type': Topic.TOPIC_STICKY, 'poll_question': faker.text(max_nb_chars=100), } assign_perm('can_create_polls', self.user, self.top_level_forum) # Run form = TopicForm( data=form_data, user=self.user, forum=self.top_level_forum, topic=self.topic, instance=self.post) # Check valid = form.is_valid() assert not valid assert 'poll_max_options' in form.errors
bsd-3-clause
4,596,627,068,081,951,000
33.507368
99
0.558233
false
aangert/PiParty
color_tests/color_combo_test.py
2
1075
from enum import Enum class Colors(Enum): Pink = (255,96,96) Magenta = (255,0,192) Orange = (255,64,0) Yellow = (255,255,0) Green = (0,255,0) Turquoise = (0,255,255) Blue = (0,0,255) Purple = (96,0,255) color_list = [x for x in Colors] quad_teams_banned = { Colors.Pink : [Colors.Magenta,Colors.Purple], Colors.Magenta : [Colors.Pink,Colors.Purple], Colors.Orange : [Colors.Yellow], Colors.Yellow : [Colors.Orange], Colors.Green : [Colors.Turquoise], Colors.Turquoise : [Colors.Green,Colors.Blue], Colors.Blue : [Colors.Turquoise], Colors.Purple : [Colors.Magenta,Colors.Pink] } for a,b,c,d in [(a,b,c,d) for a in range(8) for b in range(a+1,8) for c in range(b+1,8) for d in range(c+1,8)]: quad = [color_list[x] for x in (a,b,c,d)] quad_banned = [quad_teams_banned[i] for i in quad] quad_banned = list(set([i for sublist in quad_banned for i in sublist])) bad = False for color in quad: if color in quad_banned: bad = True if not bad: print(quad)
mit
-6,680,612,122,141,936,000
28.054054
111
0.606512
false
DrYerzinia/Cat-Finder
src/KittyTracker/kittyTracker.py
1
1581
from netaddr import * from datetime import datetime import blescan import time import sys import bluetooth._bluetooth as bluez from Kitty import Kitty from CheckKittys import CheckKittys from BLESerialScanner import BLESerialScanner import SendMail import config def process(mac, rssi): found = False for k in config.kittys: if mac == k.mac: k.lastHeard = datetime.now() print 'Heard ' , k.name , ' at ' + str(rssi) + 'dBm!' if k.ttw != 180: SendMail.sendMail(k.name + ' reacquired') k.ttw = 180 found = True break if not found: print 'Unkown mac: ' , mac sys.stdout.flush() def main(): running = True kittyChecker = CheckKittys() scanner = BLESerialScanner(process) # dev_id = 0 # try: # sock = bluez.hci_open_dev(dev_id) # print "ble thread started" # except: # print "error accessing bluetooth device..." # sys.exit(1) # blescan.hci_le_set_scan_parameters(sock) # blescan.hci_enable_le_scan(sock) kittyChecker.daemon = True kittyChecker.kittys = config.kittys kittyChecker.running = True kittyChecker.start() scanner.start() message = "Kitty Tracker Active! Now tracking " + ", ".join(str(k.name) for k in config.kittys) print message SendMail.sendMail(message) try: while running: time.sleep(1) except KeyboardInterrupt: running = False kittyChecker.running = False scanner.running = False print "Terminating..." # returnedList = blescan.parse_events(sock, 1) # for beacon in returnedList: # mac, a, b, c, d, rssi = beacon.split(',') # mac = EUI(mac) if __name__ == '__main__': main()
unlicense
-1,341,239,824,988,486,700
19.532468
96
0.688805
false
alaski/nova
nova/tests/unit/virt/libvirt/test_driver.py
1
828402
# Copyright 2010 OpenStack Foundation # Copyright 2012 University Of Minho # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from collections import deque from collections import OrderedDict import contextlib import copy import datetime import errno import glob import os import random import re import shutil import signal import threading import time import uuid import eventlet from eventlet import greenthread import fixtures from lxml import etree import mock from mox3 import mox from os_brick.initiator import connector import os_vif from oslo_concurrency import lockutils from oslo_concurrency import processutils from oslo_config import cfg from oslo_serialization import jsonutils from oslo_service import loopingcall from oslo_utils import encodeutils from oslo_utils import fileutils from oslo_utils import fixture as utils_fixture from oslo_utils import importutils from oslo_utils import units from oslo_utils import uuidutils from oslo_utils import versionutils import six from six.moves import builtins from six.moves import range from nova.api.metadata import base as instance_metadata from nova.compute import arch from nova.compute import cpumodel from nova.compute import manager from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_mode from nova.compute import vm_states import nova.conf from nova import context from nova import db from nova import exception from nova.network import model as network_model from nova import objects from nova.objects import block_device as block_device_obj from nova.objects import fields from nova.objects import migrate_data as migrate_data_obj from nova.objects import virtual_interface as obj_vif from nova.pci import manager as pci_manager from nova.pci import utils as pci_utils from nova import test from nova.tests.unit import fake_block_device from nova.tests.unit import fake_instance from nova.tests.unit import fake_network import nova.tests.unit.image.fake from nova.tests.unit import matchers from nova.tests.unit.objects import test_pci_device from nova.tests.unit.objects import test_vcpu_model from nova.tests.unit.virt.libvirt import fake_imagebackend from nova.tests.unit.virt.libvirt import fake_libvirt_utils from nova.tests.unit.virt.libvirt import fakelibvirt from nova.tests import uuidsentinel as uuids from nova import utils from nova import version from nova.virt import block_device as driver_block_device from nova.virt.disk import api as disk_api from nova.virt import driver from nova.virt import fake from nova.virt import firewall as base_firewall from nova.virt import hardware from nova.virt.image import model as imgmodel from nova.virt import images from nova.virt.libvirt import blockinfo from nova.virt.libvirt import config as vconfig from nova.virt.libvirt import driver as libvirt_driver from nova.virt.libvirt import firewall from nova.virt.libvirt import guest as libvirt_guest from nova.virt.libvirt import host from nova.virt.libvirt import imagebackend from nova.virt.libvirt import imagecache from nova.virt.libvirt import migration as libvirt_migrate from nova.virt.libvirt.storage import dmcrypt from nova.virt.libvirt.storage import lvm from nova.virt.libvirt.storage import rbd_utils from nova.virt.libvirt import utils as libvirt_utils from nova.virt.libvirt.volume import volume as volume_drivers libvirt_driver.libvirt = fakelibvirt host.libvirt = fakelibvirt libvirt_guest.libvirt = fakelibvirt libvirt_migrate.libvirt = fakelibvirt CONF = nova.conf.CONF _fake_network_info = fake_network.fake_get_instance_nw_info _fake_NodeDevXml = \ {"pci_0000_04_00_3": """ <device> <name>pci_0000_04_00_3</name> <parent>pci_0000_00_01_1</parent> <driver> <name>igb</name> </driver> <capability type='pci'> <domain>0</domain> <bus>4</bus> <slot>0</slot> <function>3</function> <product id='0x1521'>I350 Gigabit Network Connection</product> <vendor id='0x8086'>Intel Corporation</vendor> <capability type='virt_functions'> <address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/> <address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/> <address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/> <address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/> </capability> </capability> </device>""", "pci_0000_04_10_7": """ <device> <name>pci_0000_04_10_7</name> <parent>pci_0000_00_01_1</parent> <driver> <name>igbvf</name> </driver> <capability type='pci'> <domain>0</domain> <bus>4</bus> <slot>16</slot> <function>7</function> <product id='0x1520'>I350 Ethernet Controller Virtual Function </product> <vendor id='0x8086'>Intel Corporation</vendor> <capability type='phys_function'> <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/> </capability> <capability type='virt_functions'> </capability> </capability> </device>""", "pci_0000_04_11_7": """ <device> <name>pci_0000_04_11_7</name> <parent>pci_0000_00_01_1</parent> <driver> <name>igbvf</name> </driver> <capability type='pci'> <domain>0</domain> <bus>4</bus> <slot>17</slot> <function>7</function> <product id='0x1520'>I350 Ethernet Controller Virtual Function </product> <vendor id='0x8086'>Intel Corporation</vendor> <numa node='0'/> <capability type='phys_function'> <address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/> </capability> <capability type='virt_functions'> </capability> </capability> </device>""", "pci_0000_04_00_1": """ <device> <name>pci_0000_04_00_1</name> <path>/sys/devices/pci0000:00/0000:00:02.0/0000:04:00.1</path> <parent>pci_0000_00_02_0</parent> <driver> <name>mlx5_core</name> </driver> <capability type='pci'> <domain>0</domain> <bus>4</bus> <slot>0</slot> <function>1</function> <product id='0x1013'>MT27700 Family [ConnectX-4]</product> <vendor id='0x15b3'>Mellanox Technologies</vendor> <iommuGroup number='15'> <address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/> <address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/> </iommuGroup> <numa node='0'/> <pci-express> <link validity='cap' port='0' speed='8' width='16'/> <link validity='sta' speed='8' width='16'/> </pci-express> </capability> </device>""", # libvirt >= 1.3.0 nodedev-dumpxml "pci_0000_03_00_0": """ <device> <name>pci_0000_03_00_0</name> <path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.0</path> <parent>pci_0000_00_02_0</parent> <driver> <name>mlx5_core</name> </driver> <capability type='pci'> <domain>0</domain> <bus>3</bus> <slot>0</slot> <function>0</function> <product id='0x1013'>MT27700 Family [ConnectX-4]</product> <vendor id='0x15b3'>Mellanox Technologies</vendor> <capability type='virt_functions' maxCount='16'> <address domain='0x0000' bus='0x03' slot='0x00' function='0x2'/> <address domain='0x0000' bus='0x03' slot='0x00' function='0x3'/> <address domain='0x0000' bus='0x03' slot='0x00' function='0x4'/> <address domain='0x0000' bus='0x03' slot='0x00' function='0x5'/> </capability> <iommuGroup number='15'> <address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/> <address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/> </iommuGroup> <numa node='0'/> <pci-express> <link validity='cap' port='0' speed='8' width='16'/> <link validity='sta' speed='8' width='16'/> </pci-express> </capability> </device>""", "pci_0000_03_00_1": """ <device> <name>pci_0000_03_00_1</name> <path>/sys/devices/pci0000:00/0000:00:02.0/0000:03:00.1</path> <parent>pci_0000_00_02_0</parent> <driver> <name>mlx5_core</name> </driver> <capability type='pci'> <domain>0</domain> <bus>3</bus> <slot>0</slot> <function>1</function> <product id='0x1013'>MT27700 Family [ConnectX-4]</product> <vendor id='0x15b3'>Mellanox Technologies</vendor> <capability type='virt_functions' maxCount='16'/> <iommuGroup number='15'> <address domain='0x0000' bus='0x03' slot='0x00' function='0x0'/> <address domain='0x0000' bus='0x03' slot='0x00' function='0x1'/> </iommuGroup> <numa node='0'/> <pci-express> <link validity='cap' port='0' speed='8' width='16'/> <link validity='sta' speed='8' width='16'/> </pci-express> </capability> </device>""", } _fake_cpu_info = { "arch": "test_arch", "model": "test_model", "vendor": "test_vendor", "topology": { "sockets": 1, "cores": 8, "threads": 16 }, "features": ["feature1", "feature2"] } eph_default_ext = utils.get_hash_str(disk_api._DEFAULT_FILE_SYSTEM)[:7] def eph_name(size): return ('ephemeral_%(size)s_%(ext)s' % {'size': size, 'ext': eph_default_ext}) def fake_disk_info_byname(instance, type='qcow2'): """Return instance_disk_info corresponding accurately to the properties of the given Instance object. The info is returned as an OrderedDict of name->disk_info for each disk. :param instance: The instance we're generating fake disk_info for. :param type: libvirt's disk type. :return: disk_info :rtype: OrderedDict """ instance_dir = os.path.join(CONF.instances_path, instance.uuid) def instance_path(name): return os.path.join(instance_dir, name) disk_info = OrderedDict() # root disk if instance.image_ref is not None: cache_name = imagecache.get_cache_fname(instance.image_ref) disk_info['disk'] = { 'type': type, 'path': instance_path('disk'), 'virt_disk_size': instance.flavor.root_gb * units.Gi, 'backing_file': cache_name, 'disk_size': instance.flavor.root_gb * units.Gi, 'over_committed_disk_size': 0} swap_mb = instance.flavor.swap if swap_mb > 0: disk_info['disk.swap'] = { 'type': type, 'path': instance_path('disk.swap'), 'virt_disk_size': swap_mb * units.Mi, 'backing_file': 'swap_%s' % swap_mb, 'disk_size': swap_mb * units.Mi, 'over_committed_disk_size': 0} eph_gb = instance.flavor.ephemeral_gb if eph_gb > 0: disk_info['disk.local'] = { 'type': type, 'path': instance_path('disk.local'), 'virt_disk_size': eph_gb * units.Gi, 'backing_file': eph_name(eph_gb), 'disk_size': eph_gb * units.Gi, 'over_committed_disk_size': 0} if instance.config_drive: disk_info['disk.config'] = { 'type': 'raw', 'path': instance_path('disk.config'), 'virt_disk_size': 1024, 'backing_file': '', 'disk_size': 1024, 'over_committed_disk_size': 0} return disk_info def fake_disk_info_json(instance, type='qcow2'): """Return fake instance_disk_info corresponding accurately to the properties of the given Instance object. :param instance: The instance we're generating fake disk_info for. :param type: libvirt's disk type. :return: JSON representation of instance_disk_info for all disks. :rtype: str """ disk_info = fake_disk_info_byname(instance, type) return jsonutils.dumps(disk_info.values()) def _concurrency(signal, wait, done, target, is_block_dev=False): signal.send() wait.wait() done.send() class FakeVirtDomain(object): def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None): if uuidstr is None: uuidstr = str(uuid.uuid4()) self.uuidstr = uuidstr self.id = id self.domname = name self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi, None, None] if fake_xml: self._fake_dom_xml = fake_xml else: self._fake_dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> </devices> </domain> """ def name(self): if self.domname is None: return "fake-domain %s" % self else: return self.domname def ID(self): return self.id def info(self): return self._info def create(self): pass def managedSave(self, *args): pass def createWithFlags(self, launch_flags): pass def XMLDesc(self, flags): return self._fake_dom_xml def UUIDString(self): return self.uuidstr def attachDeviceFlags(self, xml, flags): pass def attachDevice(self, xml): pass def detachDeviceFlags(self, xml, flags): pass def snapshotCreateXML(self, xml, flags): pass def blockCommit(self, disk, base, top, bandwidth=0, flags=0): pass def blockRebase(self, disk, base, bandwidth=0, flags=0): pass def blockJobInfo(self, path, flags): pass def resume(self): pass def destroy(self): pass def fsFreeze(self, disks=None, flags=0): pass def fsThaw(self, disks=None, flags=0): pass def isActive(self): return True class CacheConcurrencyTestCase(test.NoDBTestCase): def setUp(self): super(CacheConcurrencyTestCase, self).setUp() self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) # utils.synchronized() will create the lock_path for us if it # doesn't already exist. It will also delete it when it's done, # which can cause race conditions with the multiple threads we # use for tests. So, create the path here so utils.synchronized() # won't delete it out from under one of the threads. self.lock_path = os.path.join(CONF.instances_path, 'locks') fileutils.ensure_tree(self.lock_path) def fake_exists(fname): basedir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) if fname == basedir or fname == self.lock_path: return True return False def fake_execute(*args, **kwargs): pass def fake_extend(image, size, use_cow=False): pass self.stub_out('os.path.exists', fake_exists) self.stubs.Set(utils, 'execute', fake_execute) self.stubs.Set(imagebackend.disk, 'extend', fake_extend) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) def _fake_instance(self, uuid): return objects.Instance(id=1, uuid=uuid) def test_same_fname_concurrency(self): # Ensures that the same fname cache runs at a sequentially. uuid = uuidutils.generate_uuid() backend = imagebackend.Backend(False) wait1 = eventlet.event.Event() done1 = eventlet.event.Event() sig1 = eventlet.event.Event() thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname', None, signal=sig1, wait=wait1, done=done1) eventlet.sleep(0) # Thread 1 should run before thread 2. sig1.wait() wait2 = eventlet.event.Event() done2 = eventlet.event.Event() sig2 = eventlet.event.Event() thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname', None, signal=sig2, wait=wait2, done=done2) wait2.send() eventlet.sleep(0) try: self.assertFalse(done2.ready()) finally: wait1.send() done1.wait() eventlet.sleep(0) self.assertTrue(done2.ready()) # Wait on greenthreads to assert they didn't raise exceptions # during execution thr1.wait() thr2.wait() def test_different_fname_concurrency(self): # Ensures that two different fname caches are concurrent. uuid = uuidutils.generate_uuid() backend = imagebackend.Backend(False) wait1 = eventlet.event.Event() done1 = eventlet.event.Event() sig1 = eventlet.event.Event() thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname2', None, signal=sig1, wait=wait1, done=done1) eventlet.sleep(0) # Thread 1 should run before thread 2. sig1.wait() wait2 = eventlet.event.Event() done2 = eventlet.event.Event() sig2 = eventlet.event.Event() thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid), 'name').cache, _concurrency, 'fname1', None, signal=sig2, wait=wait2, done=done2) eventlet.sleep(0) # Wait for thread 2 to start. sig2.wait() wait2.send() tries = 0 while not done2.ready() and tries < 10: eventlet.sleep(0) tries += 1 try: self.assertTrue(done2.ready()) finally: wait1.send() eventlet.sleep(0) # Wait on greenthreads to assert they didn't raise exceptions # during execution thr1.wait() thr2.wait() class FakeVolumeDriver(object): def __init__(self, *args, **kwargs): pass def attach_volume(self, *args): pass def detach_volume(self, *args): pass def get_xml(self, *args): return "" def get_config(self, *args): """Connect the volume to a fake device.""" conf = vconfig.LibvirtConfigGuestDisk() conf.source_type = "network" conf.source_protocol = "fake" conf.source_name = "fake" conf.target_dev = "fake" conf.target_bus = "fake" return conf def connect_volume(self, *args): """Connect the volume to a fake device.""" pass class FakeConfigGuestDisk(object): def __init__(self, *args, **kwargs): self.source_type = None self.driver_cache = None class FakeConfigGuest(object): def __init__(self, *args, **kwargs): self.driver_cache = None class FakeNodeDevice(object): def __init__(self, fakexml): self.xml = fakexml def XMLDesc(self, flags): return self.xml def _create_test_instance(): flavor = objects.Flavor(memory_mb=2048, swap=0, vcpu_weight=None, root_gb=10, id=2, name=u'm1.small', ephemeral_gb=20, rxtx_factor=1.0, flavorid=u'1', vcpus=2, extra_specs={}) return { 'id': 1, 'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310', 'memory_kb': '1024000', 'basepath': '/some/path', 'bridge_name': 'br100', 'display_name': "Acme webserver", 'vcpus': 2, 'project_id': 'fake', 'bridge': 'br101', 'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'root_gb': 10, 'ephemeral_gb': 20, 'instance_type_id': '5', # m1.small 'extra_specs': {}, 'system_metadata': { 'image_disk_format': 'raw', }, 'flavor': flavor, 'new_flavor': None, 'old_flavor': None, 'pci_devices': objects.PciDeviceList(), 'numa_topology': None, 'config_drive': None, 'vm_mode': None, 'kernel_id': None, 'ramdisk_id': None, 'os_type': 'linux', 'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb', 'ephemeral_key_uuid': None, 'vcpu_model': None, 'host': 'fake-host', 'task_state': None, } class LibvirtConnTestCase(test.NoDBTestCase): REQUIRES_LOCKING = True _EPHEMERAL_20_DEFAULT = eph_name(20) def setUp(self): super(LibvirtConnTestCase, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = context.get_admin_context() temp_dir = self.useFixture(fixtures.TempDir()).path self.flags(instances_path=temp_dir) self.flags(snapshots_directory=temp_dir, group='libvirt') self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.flags(sysinfo_serial="hardware", group="libvirt") # normally loaded during nova-compute startup os_vif.initialize() self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) def fake_extend(image, size, use_cow=False): pass self.stubs.Set(libvirt_driver.disk_api, 'extend', fake_extend) self.stubs.Set(imagebackend.Image, 'resolve_driver_format', imagebackend.Image._get_driver_format) self.useFixture(fakelibvirt.FakeLibvirtFixture()) self.test_instance = _create_test_instance() self.test_image_meta = { "disk_format": "raw", } self.image_service = nova.tests.unit.image.fake.stub_out_image_service( self) self.device_xml_tmpl = """ <domain type='kvm'> <devices> <disk type='block' device='disk'> <driver name='qemu' type='raw' cache='none'/> <source dev='{device_path}'/> <target bus='virtio' dev='vdb'/> <serial>58a84f6d-3f0c-4e19-a0af-eb657b790657</serial> <address type='pci' domain='0x0' bus='0x0' slot='0x04' \ function='0x0'/> </disk> </devices> </domain> """ def relpath(self, path): return os.path.relpath(path, CONF.instances_path) def tearDown(self): nova.tests.unit.image.fake.FakeImageService_reset() super(LibvirtConnTestCase, self).tearDown() def test_driver_capabilities(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertTrue(drvr.capabilities['has_imagecache'], 'Driver capabilities for \'has_imagecache\' ' 'is invalid') self.assertTrue(drvr.capabilities['supports_recreate'], 'Driver capabilities for \'supports_recreate\' ' 'is invalid') self.assertFalse(drvr.capabilities['supports_migrate_to_same_host'], 'Driver capabilities for ' '\'supports_migrate_to_same_host\' is invalid') self.assertTrue(drvr.capabilities['supports_attach_interface'], 'Driver capabilities for ' '\'supports_attach_interface\' ' 'is invalid') def create_fake_libvirt_mock(self, **kwargs): """Defining mocks for LibvirtDriver(libvirt is not used).""" # A fake libvirt.virConnect class FakeLibvirtDriver(object): def defineXML(self, xml): return FakeVirtDomain() # Creating mocks volume_driver = ['iscsi=nova.tests.unit.virt.libvirt.test_driver' '.FakeVolumeDriver'] fake = FakeLibvirtDriver() # Customizing above fake if necessary for key, val in kwargs.items(): fake.__setattr__(key, val) self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake) self.stubs.Set(libvirt_driver.LibvirtDriver, '_get_volume_drivers', lambda x: volume_driver) self.stubs.Set(host.Host, 'get_connection', lambda x: fake) def fake_lookup(self, instance_name): return FakeVirtDomain() def fake_execute(self, *args, **kwargs): open(args[-1], "a").close() def _create_service(self, **kwargs): service_ref = {'host': kwargs.get('host', 'dummy'), 'disabled': kwargs.get('disabled', False), 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0} return objects.Service(**service_ref) def _get_pause_flag(self, drvr, network_info, power_on=True, vifs_already_plugged=False): timeout = CONF.vif_plugging_timeout events = [] if (drvr._conn_supports_start_paused and utils.is_neutron() and not vifs_already_plugged and power_on and timeout): events = drvr._get_neutron_events(network_info) return bool(events) def test_public_api_signatures(self): baseinst = driver.ComputeDriver(None) inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertPublicAPISignatures(baseinst, inst) def test_legacy_block_device_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(drvr.need_legacy_block_device_info) @mock.patch.object(host.Host, "has_min_version") def test_min_version_start_ok(self, mock_version): mock_version.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") @mock.patch.object(host.Host, "has_min_version") def test_min_version_start_abort(self, mock_version): mock_version.return_value = False drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_version_deprecation_warning(self, mock_warning, mock_get_libversion): # Skip test if there's no currently planned new min version if (versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) == versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_VERSION)): self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION") # Test that a warning is logged if the libvirt version is less than # the next required minimum version. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': versionutils.convert_version_to_str( versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertTrue(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_QEMU_VERSION) - 1) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_qemu_version_deprecation_warning(self, mock_warning, mock_get_libversion): # Skip test if there's no currently planned new min version if (versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_QEMU_VERSION) == versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_VERSION)): self.skipTest("NEXT_MIN_QEMU_VERSION == MIN_QEMU_VERSION") # Test that a warning is logged if the libvirt version is less than # the next required minimum version. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': versionutils.convert_version_to_str( versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_QEMU_VERSION))} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertTrue(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION)) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_version_ok(self, mock_warning, mock_get_libversion): # Skip test if there's no currently planned new min version if (versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) == versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_VERSION)): self.skipTest("NEXT_MIN_LIBVIRT_VERSION == MIN_LIBVIRT_VERSION") # Test that a warning is not logged if the libvirt version is greater # than or equal to NEXT_MIN_LIBVIRT_VERSION. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': versionutils.convert_version_to_str( versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertFalse(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_QEMU_VERSION)) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_next_min_qemu_version_ok(self, mock_warning, mock_get_libversion): # Skip test if there's no currently planned new min version if (versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_QEMU_VERSION) == versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_VERSION)): self.skipTest("NEXT_MIN_QEMU_VERSION == MIN_QEMU_VERSION") # Test that a warning is not logged if the libvirt version is greater # than or equal to NEXT_MIN_QEMU_VERSION. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") # assert that the next min version is in a warning message expected_arg = {'version': versionutils.convert_version_to_str( versionutils.convert_version_to_int( libvirt_driver.NEXT_MIN_QEMU_VERSION))} version_arg_found = False for call in mock_warning.call_args_list: if call[0][1] == expected_arg: version_arg_found = True break self.assertFalse(version_arg_found) @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.PPC64)) - 1) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(arch, "from_host", return_value=arch.PPC64) def test_min_version_ppc_old_libvirt(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.PPC64)) - 1) @mock.patch.object(arch, "from_host", return_value=arch.PPC64) def test_min_version_ppc_old_qemu(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.PPC64))) @mock.patch.object(arch, "from_host", return_value=arch.PPC64) def test_min_version_ppc_ok(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.S390X)) - 1) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_old_libvirt(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.S390X)) - 1) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_old_qemu(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.init_host, "dummyhost") @mock.patch.object(fakelibvirt.Connection, 'getLibVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(fakelibvirt.Connection, 'getVersion', return_value=versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_OTHER_ARCH.get( arch.S390X))) @mock.patch.object(arch, "from_host", return_value=arch.S390X) def test_min_version_s390_ok(self, mock_libv, mock_qemu, mock_arch): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("dummyhost") def _do_test_parse_migration_flags(self, lm_expected=None, bm_expected=None): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr._parse_migration_flags() if lm_expected is not None: self.assertEqual(lm_expected, drvr._live_migration_flags) if bm_expected is not None: self.assertEqual(bm_expected, drvr._block_migration_flags) def test_parse_live_migration_flags_default(self): self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE)) def test_parse_live_migration_flags(self): self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE)) def test_parse_block_migration_flags_default(self): self._do_test_parse_migration_flags( bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) def test_parse_block_migration_flags(self): self._do_test_parse_migration_flags( bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) def test_parse_migration_flags_p2p_xen(self): self.flags(virt_type='xen', group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_LIVE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) def test_live_migration_tunnelled_none(self): self.flags(live_migration_tunnelled=None, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED)) def test_live_migration_tunnelled_true(self): self.flags(live_migration_tunnelled=True, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_TUNNELLED)) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_live_migration_permit_postcopy_true(self, host): self.flags(live_migration_permit_post_copy=True, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY)) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_live_migration_permit_auto_converge_true(self, host): self.flags(live_migration_permit_auto_converge=True, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE)) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_live_migration_permit_auto_converge_and_post_copy_true(self, host): self.flags(live_migration_permit_auto_converge=True, group='libvirt') self.flags(live_migration_permit_post_copy=True, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_POSTCOPY)) @mock.patch.object(host.Host, 'has_min_version') def test_live_migration_auto_converge_and_post_copy_true_old_libvirt( self, mock_host): self.flags(live_migration_permit_auto_converge=True, group='libvirt') self.flags(live_migration_permit_post_copy=True, group='libvirt') def fake_has_min_version(lv_ver=None, hv_ver=None, hv_type=None): if (lv_ver == libvirt_driver.MIN_LIBVIRT_POSTCOPY_VERSION and hv_ver == libvirt_driver.MIN_QEMU_POSTCOPY_VERSION): return False return True mock_host.side_effect = fake_has_min_version self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC | libvirt_driver.libvirt.VIR_MIGRATE_AUTO_CONVERGE)) @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_live_migration_permit_postcopy_true_old_libvirt(self, host): self.flags(live_migration_permit_post_copy=True, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_live_migration_permit_auto_converge_true_old_libvirt(self, host): self.flags(live_migration_permit_auto_converge=True, group='libvirt') self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) def test_live_migration_permit_postcopy_false(self): self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) def test_live_migration_permit_autoconverge_false(self): self._do_test_parse_migration_flags( lm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE), bm_expected=(libvirt_driver.libvirt.VIR_MIGRATE_UNDEFINE_SOURCE | libvirt_driver.libvirt.VIR_MIGRATE_PEER2PEER | libvirt_driver.libvirt.VIR_MIGRATE_LIVE | libvirt_driver.libvirt.VIR_MIGRATE_NON_SHARED_INC)) @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with("root", "123") @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_parallels(self, mock_get_guest, ver): self.flags(virt_type='parallels', group='libvirt') instance = objects.Instance(**self.test_instance) mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with("root", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_windows(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) instance.os_type = "windows" mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with( "Administrator", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_image(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes", "os_admin_user": "foo" }} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.set_admin_password(instance, "123") mock_guest.set_user_password.assert_called_once_with("foo", "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_set_admin_password_bad_version(self, mock_svc, mock_image): instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} for hyp in ('kvm', 'parallels'): self.flags(virt_type=hyp, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.SetAdminPasswdNotSupported, drvr.set_admin_password, instance, "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_set_admin_password_bad_hyp(self, mock_svc, mock_image): self.flags(virt_type='lxc', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.SetAdminPasswdNotSupported, drvr.set_admin_password, instance, "123") @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_set_admin_password_guest_agent_not_running(self, mock_svc): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.QemuGuestAgentNotEnabled, drvr.set_admin_password, instance, "123") @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.host.Host.get_guest') def test_set_admin_password_error(self, mock_get_guest, ver, mock_image): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.test_instance) mock_image.return_value = {"properties": { "hw_qemu_guest_agent": "yes"}} mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_guest.set_user_password.side_effect = ( fakelibvirt.libvirtError("error")) mock_get_guest.return_value = mock_guest drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.NovaException, drvr.set_admin_password, instance, "123") @mock.patch.object(objects.Service, 'save') @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_disable(self, mock_svc, mock_save): # Tests disabling an enabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(False) self.assertTrue(svc.disabled) mock_save.assert_called_once_with() @mock.patch.object(objects.Service, 'save') @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_enable(self, mock_svc, mock_save): # Tests enabling a disabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=True, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(True) # since disabled_reason is not set and not prefixed with "AUTO:", # service should not be enabled. mock_save.assert_not_called() self.assertTrue(svc.disabled) @mock.patch.object(objects.Service, 'save') @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_enable_state_enabled(self, mock_svc, mock_save): # Tests enabling an enabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=False, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(True) self.assertFalse(svc.disabled) mock_save.assert_not_called() @mock.patch.object(objects.Service, 'save') @mock.patch.object(objects.Service, 'get_by_compute_host') def test_set_host_enabled_with_disable_state_disabled(self, mock_svc, mock_save): # Tests disabling a disabled host. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) svc = self._create_service(disabled=True, host='fake-mini') mock_svc.return_value = svc drvr._set_host_enabled(False) mock_save.assert_not_called() self.assertTrue(svc.disabled) def test_set_host_enabled_swallows_exceptions(self): # Tests that set_host_enabled will swallow exceptions coming from the # db_api code so they don't break anything calling it, e.g. the # _get_new_connection method. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object(db, 'service_get_by_compute_host') as db_mock: # Make db.service_get_by_compute_host raise NovaException; this # is more robust than just raising ComputeHostNotFound. db_mock.side_effect = exception.NovaException drvr._set_host_enabled(False) @mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName") def test_prepare_pci_device(self, mock_lookup): pci_devices = [dict(hypervisor_name='xxx')] self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conn = drvr._host.get_connection() mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn) drvr._prepare_pci_devices_for_use(pci_devices) @mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName") @mock.patch.object(fakelibvirt.virNodeDevice, "dettach") def test_prepare_pci_device_exception(self, mock_detach, mock_lookup): pci_devices = [dict(hypervisor_name='xxx', id='id1', instance_uuid='uuid')] self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conn = drvr._host.get_connection() mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn) mock_detach.side_effect = fakelibvirt.libvirtError("xxxx") self.assertRaises(exception.PciDevicePrepareFailed, drvr._prepare_pci_devices_for_use, pci_devices) @mock.patch.object(host.Host, "has_min_version", return_value=False) def test_device_metadata(self, mock_version): xml = """ <domain> <name>dummy</name> <uuid>32dfcb37-5af1-552b-357c-be8c3aa38310</uuid> <memory>1048576</memory> <vcpu>1</vcpu> <os> <type arch='x86_64' machine='pc-i440fx-2.4'>hvm</type> </os> <devices> <disk type='block' device='disk'> <driver name='qemu' type='qcow2'/> <source dev='/dev/mapper/generic'/> <target dev='sda' bus='scsi'/> <address type='drive' controller='0' bus='0' target='0' unit='0'/> </disk> <disk type='block' device='disk'> <driver name='qemu' type='qcow2'/> <source dev='/dev/mapper/generic-1'/> <target dev='hda' bus='ide'/> <address type='drive' controller='0' bus='1' target='0' unit='0'/> </disk> <disk type='block' device='disk'> <driver name='qemu' type='qcow2'/> <source dev='/dev/mapper/generic-2'/> <target dev='hdb' bus='ide'/> <address type='drive' controller='0' bus='1' target='1' unit='1'/> </disk> <disk type='block' device='disk'> <driver name='qemu' type='qcow2'/> <source dev='/dev/mapper/aa1'/> <target dev='sdb' bus='usb'/> </disk> <disk type='block' device='disk'> <driver name='qemu' type='qcow2'/> <source dev='/var/lib/libvirt/images/centos'/> <backingStore/> <target dev='vda' bus='virtio'/> <boot order='1'/> <alias name='virtio-disk0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x09' function='0x0'/> </disk> <disk type='file' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source file='/var/lib/libvirt/images/generic.qcow2'/> <target dev='vdb' bus='virtio'/> <address type='virtio-mmio'/> </disk> <disk type='file' device='disk'> <driver name='qemu' type='qcow2'/> <source file='/var/lib/libvirt/images/test.qcow2'/> <backingStore/> <target dev='vdc' bus='virtio'/> <alias name='virtio-disk1'/> <address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0000'/> </disk> <interface type='network'> <mac address='52:54:00:f6:35:8f'/> <source network='default'/> <model type='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/> </interface> <interface type='network'> <mac address='51:5a:2c:a4:5e:1b'/> <source network='default'/> <model type='virtio'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x1'/> </interface> <interface type='network'> <mac address='fa:16:3e:d1:28:e4'/> <source network='default'/> <model type='virtio'/> <address type='virtio-mmio'/> </interface> <interface type='network'> <mac address='52:54:00:14:6f:50'/> <source network='default' bridge='virbr0'/> <target dev='vnet0'/> <model type='virtio'/> <alias name='net0'/> <address type='ccw' cssid='0xfe' ssid='0x0' devno='0x0001'/> </interface> </devices> </domain>""" drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) instance_ref = objects.Instance(**self.test_instance) bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sda', 'tag': "db"}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/hda', 'tag': "nfvfunc1"}), fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdb', 'tag': "nfvfunc2"}), fake_block_device.FakeDbBlockDeviceDict( {'id': 4, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/hdb'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 5, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vda', 'tag': "nfvfunc3"}), fake_block_device.FakeDbBlockDeviceDict( {'id': 6, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdb', 'tag': "nfvfunc4"}), fake_block_device.FakeDbBlockDeviceDict( {'id': 7, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdc', 'tag': "nfvfunc5"}), ] ) vif = obj_vif.VirtualInterface(context=self.context) vif.address = '52:54:00:f6:35:8f' vif.network_id = 123 vif.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310' vif.uuid = '12ec4b21-ef22-6c21-534b-ba3e3ab3a311' vif.tag = 'mytag1' vif1 = obj_vif.VirtualInterface(context=self.context) vif1.address = '51:5a:2c:a4:5e:1b' vif1.network_id = 123 vif1.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310' vif1.uuid = 'abec4b21-ef22-6c21-534b-ba3e3ab3a312' vif1.tag = None vif2 = obj_vif.VirtualInterface(context=self.context) vif2.address = 'fa:16:3e:d1:28:e4' vif2.network_id = 123 vif2.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310' vif2.uuid = '645686e4-7086-4eab-8c2f-c41f017a1b16' vif2.tag = 'mytag2' vif3 = obj_vif.VirtualInterface(context=self.context) vif3.address = '52:54:00:14:6f:50' vif3.network_id = 123 vif3.instance_uuid = '32dfcb37-5af1-552b-357c-be8c3aa38310' vif3.uuid = '99cc3604-782d-4a32-a27c-bc33ac56ce86' vif3.tag = 'mytag3' vifs = [vif, vif1, vif2, vif3] with test.nested( mock.patch('nova.objects.VirtualInterfaceList' '.get_by_instance_uuid', return_value=vifs), mock.patch('nova.objects.BlockDeviceMappingList' '.get_by_instance_uuid', return_value=bdms), mock.patch('nova.virt.libvirt.host.Host.get_guest', return_value=guest), mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc', return_value=xml)): metadata_obj = drvr._build_device_metadata(self.context, instance_ref) metadata = metadata_obj.devices self.assertEqual(9, len(metadata)) self.assertIsInstance(metadata[0], objects.DiskMetadata) self.assertIsInstance(metadata[0].bus, objects.SCSIDeviceBus) self.assertEqual(['db'], metadata[0].tags) self.assertFalse(metadata[0].bus.obj_attr_is_set('address')) self.assertEqual(['nfvfunc1'], metadata[1].tags) self.assertIsInstance(metadata[1], objects.DiskMetadata) self.assertIsInstance(metadata[1].bus, objects.IDEDeviceBus) self.assertEqual(['nfvfunc1'], metadata[1].tags) self.assertFalse(metadata[1].bus.obj_attr_is_set('address')) self.assertIsInstance(metadata[2], objects.DiskMetadata) self.assertIsInstance(metadata[2].bus, objects.USBDeviceBus) self.assertEqual(['nfvfunc2'], metadata[2].tags) self.assertFalse(metadata[2].bus.obj_attr_is_set('address')) self.assertIsInstance(metadata[3], objects.DiskMetadata) self.assertIsInstance(metadata[3].bus, objects.PCIDeviceBus) self.assertEqual(['nfvfunc3'], metadata[3].tags) self.assertEqual('0000:00:09.0', metadata[3].bus.address) self.assertIsInstance(metadata[4], objects.DiskMetadata) self.assertEqual(['nfvfunc4'], metadata[4].tags) self.assertIsInstance(metadata[5], objects.DiskMetadata) self.assertEqual(['nfvfunc5'], metadata[5].tags) self.assertIsInstance(metadata[6], objects.NetworkInterfaceMetadata) self.assertIsInstance(metadata[6].bus, objects.PCIDeviceBus) self.assertEqual(['mytag1'], metadata[6].tags) self.assertEqual('0000:00:03.0', metadata[6].bus.address) self.assertIsInstance(metadata[7], objects.NetworkInterfaceMetadata) self.assertEqual(['mytag2'], metadata[7].tags) self.assertIsInstance(metadata[8], objects.NetworkInterfaceMetadata) self.assertEqual(['mytag3'], metadata[8].tags) @mock.patch.object(host.Host, 'get_connection') @mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc') def test_detach_pci_devices(self, mocked_get_xml_desc, mock_conn): fake_domXML1_with_pci = ( """<domain> <devices> <disk type='file' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source file='xxx'/> <target dev='vda' bus='virtio'/> <alias name='virtio-disk0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </disk> <hostdev mode="subsystem" type="pci" managed="yes"> <source> <address function="0x1" slot="0x10" domain="0x0001" bus="0x04"/> </source> </hostdev></devices></domain>""") fake_domXML1_without_pci = ( """<domain> <devices> <disk type='file' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source file='xxx'/> <target dev='vda' bus='virtio'/> <alias name='virtio-disk0'/> <address type='pci' domain='0x0001' bus='0x00' slot='0x04' function='0x0'/> </disk></devices></domain>""") pci_device_info = {'compute_node_id': 1, 'instance_uuid': 'uuid', 'address': '0001:04:10.1'} pci_device = objects.PciDevice(**pci_device_info) pci_devices = [pci_device] mocked_get_xml_desc.return_value = fake_domXML1_without_pci drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) dom = fakelibvirt.Domain( drvr._get_connection(), fake_domXML1_with_pci, False) guest = libvirt_guest.Guest(dom) drvr._detach_pci_devices(guest, pci_devices) @mock.patch.object(host.Host, 'get_connection') @mock.patch.object(nova.virt.libvirt.guest.Guest, 'get_xml_desc') def test_detach_pci_devices_timeout(self, mocked_get_xml_desc, mock_conn): fake_domXML1_with_pci = ( """<domain> <devices> <disk type='file' device='disk'> <driver name='qemu' type='qcow2' cache='none'/> <source file='xxx'/> <target dev='vda' bus='virtio'/> <alias name='virtio-disk0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/> </disk> <hostdev mode="subsystem" type="pci" managed="yes"> <source> <address function="0x1" slot="0x10" domain="0x0001" bus="0x04"/> </source> </hostdev></devices></domain>""") pci_device_info = {'compute_node_id': 1, 'instance_uuid': 'uuid', 'address': '0001:04:10.1'} pci_device = objects.PciDevice(**pci_device_info) pci_devices = [pci_device] mocked_get_xml_desc.return_value = fake_domXML1_with_pci drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) dom = fakelibvirt.Domain( drvr._get_connection(), fake_domXML1_with_pci, False) guest = libvirt_guest.Guest(dom) self.assertRaises(exception.PciDeviceDetachFailed, drvr._detach_pci_devices, guest, pci_devices) @mock.patch.object(connector, 'get_connector_properties') def test_get_connector(self, fake_get_connector): initiator = 'fake.initiator.iqn' ip = 'fakeip' host = 'fakehost' wwpns = ['100010604b019419'] wwnns = ['200010604b019419'] self.flags(my_ip=ip) self.flags(host=host) expected = { 'ip': ip, 'initiator': initiator, 'host': host, 'wwpns': wwpns, 'wwnns': wwnns } volume = { 'id': 'fake' } # TODO(walter-boring) add the fake in os-brick fake_get_connector.return_value = expected drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) result = drvr.get_volume_connector(volume) self.assertThat(expected, matchers.DictMatches(result)) @mock.patch.object(connector, 'get_connector_properties') def test_get_connector_storage_ip(self, fake_get_connector): ip = '100.100.100.100' storage_ip = '101.101.101.101' self.flags(my_block_storage_ip=storage_ip, my_ip=ip) volume = { 'id': 'fake' } expected = { 'ip': storage_ip } # TODO(walter-boring) add the fake in os-brick fake_get_connector.return_value = expected drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) result = drvr.get_volume_connector(volume) self.assertEqual(storage_ip, result['ip']) def test_lifecycle_event_registration(self): calls = [] def fake_registerErrorHandler(*args, **kwargs): calls.append('fake_registerErrorHandler') def fake_get_host_capabilities(**args): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.ARMV7 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu calls.append('fake_get_host_capabilities') return caps @mock.patch.object(fakelibvirt, 'registerErrorHandler', side_effect=fake_registerErrorHandler) @mock.patch.object(host.Host, "get_capabilities", side_effect=fake_get_host_capabilities) def test_init_host(get_host_capabilities, register_error_handler): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host("test_host") test_init_host() # NOTE(dkliban): Will fail if get_host_capabilities is called before # registerErrorHandler self.assertEqual(['fake_registerErrorHandler', 'fake_get_host_capabilities'], calls) def test_sanitize_log_to_xml(self): # setup fake data data = {'auth_password': 'scrubme'} bdm = [{'connection_info': {'data': data}}] bdi = {'block_device_mapping': bdm} # Tests that the parameters to the _get_guest_xml method # are sanitized for passwords when logged. def fake_debug(*args, **kwargs): if 'auth_password' in args[0]: self.assertNotIn('scrubme', args[0]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = mock.Mock() with test.nested( mock.patch.object(libvirt_driver.LOG, 'debug', side_effect=fake_debug), mock.patch.object(drvr, '_get_guest_config', return_value=conf) ) as ( debug_mock, conf_mock ): drvr._get_guest_xml(self.context, self.test_instance, network_info={}, disk_info={}, image_meta={}, block_device_info=bdi) # we don't care what the log message is, we just want to make sure # our stub method is called which asserts the password is scrubbed self.assertTrue(debug_mock.called) @mock.patch.object(time, "time") def test_get_guest_config(self, time_mock): time_mock.return_value = 1234567.89 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) test_instance = copy.deepcopy(self.test_instance) test_instance["display_name"] = "purple tomatoes" ctxt = context.RequestContext(project_id=123, project_name="aubergine", user_id=456, user_name="pie") flavor = objects.Flavor(name='m1.small', memory_mb=6, vcpus=28, root_gb=496, ephemeral_gb=8128, swap=33550336, extra_specs={}) instance_ref = objects.Instance(**test_instance) instance_ref.flavor = flavor image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info, context=ctxt) self.assertEqual(cfg.uuid, instance_ref["uuid"]) self.assertEqual(2, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertEqual(cfg.memory, 6 * units.Ki) self.assertEqual(cfg.vcpus, 28) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_boot_dev, ["hd"]) self.assertIsNone(cfg.os_root) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(len(cfg.metadata), 1) self.assertIsInstance(cfg.metadata[0], vconfig.LibvirtConfigGuestMetaNovaInstance) self.assertEqual(version.version_string_with_package(), cfg.metadata[0].package) self.assertEqual("purple tomatoes", cfg.metadata[0].name) self.assertEqual(1234567.89, cfg.metadata[0].creationTime) self.assertEqual("image", cfg.metadata[0].roottype) self.assertEqual(str(instance_ref["image_ref"]), cfg.metadata[0].rootid) self.assertIsInstance(cfg.metadata[0].owner, vconfig.LibvirtConfigGuestMetaNovaOwner) self.assertEqual(456, cfg.metadata[0].owner.userid) self.assertEqual("pie", cfg.metadata[0].owner.username) self.assertEqual(123, cfg.metadata[0].owner.projectid) self.assertEqual("aubergine", cfg.metadata[0].owner.projectname) self.assertIsInstance(cfg.metadata[0].flavor, vconfig.LibvirtConfigGuestMetaNovaFlavor) self.assertEqual("m1.small", cfg.metadata[0].flavor.name) self.assertEqual(6, cfg.metadata[0].flavor.memory) self.assertEqual(28, cfg.metadata[0].flavor.vcpus) self.assertEqual(496, cfg.metadata[0].flavor.disk) self.assertEqual(8128, cfg.metadata[0].flavor.ephemeral) self.assertEqual(33550336, cfg.metadata[0].flavor.swap) def test_get_guest_config_lxc(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, {'mapping': {}}) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory) self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline) self.assertIsNone(cfg.os_root) self.assertEqual(3, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) def test_get_guest_config_lxc_with_id_maps(self): self.flags(virt_type='lxc', group='libvirt') self.flags(uid_maps=['0:1000:100'], group='libvirt') self.flags(gid_maps=['0:1000:100'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, {'mapping': {}}) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory) self.assertEqual(instance_ref.vcpus, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline) self.assertIsNone(cfg.os_root) self.assertEqual(3, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) self.assertEqual(len(cfg.idmaps), 2) self.assertIsInstance(cfg.idmaps[0], vconfig.LibvirtConfigGuestUIDMap) self.assertIsInstance(cfg.idmaps[1], vconfig.LibvirtConfigGuestGIDMap) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_fits(self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps)): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_no_fit(self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(random, 'choice') ) as (get_host_cap_mock, get_vcpu_pin_set_mock, choice_mock): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertFalse(choice_mock.called) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) def _test_get_guest_memory_backing_config( self, host_topology, inst_topology, numatune): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object( drvr, "_get_host_numa_topology", return_value=host_topology): return drvr._get_guest_memory_backing_config( inst_topology, numatune, {}) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_memory_backing_config_large_success(self, mock_version): host_topology = objects.NUMATopology( cells=[ objects.NUMACell( id=3, cpuset=set([1]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=2000, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0), objects.NUMAPagesTopology(size_kb=1048576, total=0, used=0), ])]) inst_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)]) numa_tune = vconfig.LibvirtConfigGuestNUMATune() numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()] numa_tune.memnodes[0].cellid = 0 numa_tune.memnodes[0].nodeset = [3] result = self._test_get_guest_memory_backing_config( host_topology, inst_topology, numa_tune) self.assertEqual(1, len(result.hugepages)) self.assertEqual(2048, result.hugepages[0].size_kb) self.assertEqual([0], result.hugepages[0].nodeset) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_memory_backing_config_smallest(self, mock_version): host_topology = objects.NUMATopology( cells=[ objects.NUMACell( id=3, cpuset=set([1]), memory=1024, mempages=[ objects.NUMAPagesTopology(size_kb=4, total=2000, used=0), objects.NUMAPagesTopology(size_kb=2048, total=512, used=0), objects.NUMAPagesTopology(size_kb=1048576, total=0, used=0), ])]) inst_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)]) numa_tune = vconfig.LibvirtConfigGuestNUMATune() numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()] numa_tune.memnodes[0].cellid = 0 numa_tune.memnodes[0].nodeset = [3] result = self._test_get_guest_memory_backing_config( host_topology, inst_topology, numa_tune) self.assertIsNone(result) def test_get_guest_memory_backing_config_realtime(self): flavor = {"extra_specs": { "hw:cpu_realtime": "yes", "hw:cpu_policy": "dedicated" }} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) membacking = drvr._get_guest_memory_backing_config( None, None, flavor) self.assertTrue(membacking.locked) self.assertFalse(membacking.sharedpages) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_pci_no_numa_info( self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.AVAILABLE, address='0000:00:00.1', instance_uuid=None, request_id=None, extra_info={}, numa_node=None) pci_device = objects.PciDevice(**pci_device_info) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object( host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), mock.patch.object(pci_manager, "get_instance_pci_devs", return_value=[pci_device])): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_2pci_no_fit(self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.AVAILABLE, address='0000:00:00.1', instance_uuid=None, request_id=None, extra_info={}, numa_node=1) pci_device = objects.PciDevice(**pci_device_info) pci_device_info.update(numa_node=0, address='0000:00:00.2') pci_device2 = objects.PciDevice(**pci_device_info) with test.nested( mock.patch.object( host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([3])), mock.patch.object(random, 'choice'), mock.patch.object(pci_manager, "get_instance_pci_devs", return_value=[pci_device, pci_device2]) ) as (get_host_cap_mock, get_vcpu_pin_set_mock, choice_mock, pci_mock): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertFalse(choice_mock.called) self.assertEqual(set([3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') @mock.patch.object(host.Host, 'get_capabilities') @mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled') def _test_get_guest_config_numa_unsupported(self, fake_lib_version, fake_version, fake_type, fake_arch, exception_class, pagesize, mock_host, mock_caps, mock_lib_version, mock_version, mock_type): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024, pagesize=pagesize)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = fake_arch caps.host.topology = self._fake_caps_numa_topology() mock_type.return_value = fake_type mock_version.return_value = fake_version mock_lib_version.return_value = fake_lib_version mock_caps.return_value = caps drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(exception_class, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_numa_old_version_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1, versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_old_version_libvirt_ppc(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION_PPC) - 1, versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.PPC64LE, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_bad_version_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.BAD_LIBVIRT_NUMA_VERSIONS[0]), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) @mock.patch.object(libvirt_driver.LOG, 'warning') def test_has_numa_support_bad_version_libvirt_log(self, mock_warn): # Tests that a warning is logged once and only once when there is a bad # BAD_LIBVIRT_NUMA_VERSIONS detected. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(hasattr(drvr, '_bad_libvirt_numa_version_warn')) with mock.patch.object(drvr._host, 'has_version', return_value=True): for i in range(2): self.assertFalse(drvr._has_numa_support()) self.assertTrue(drvr._bad_libvirt_numa_version_warn) self.assertEqual(1, mock_warn.call_count) # assert the version is logged properly self.assertEqual('1.2.9.2', mock_warn.call_args[0][1]) def test_get_guest_config_numa_old_version_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1, host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_other_arch_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.S390, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_xen(self): self.flags(virt_type='xen', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION), versionutils.convert_version_to_int((4, 5, 0)), 'XEN', arch.X86_64, exception.NUMATopologyUnsupported, None) def test_get_guest_config_numa_old_pages_libvirt(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1, versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION), host.HV_DRIVER_QEMU, arch.X86_64, exception.MemoryPagesUnsupported, 2048) def test_get_guest_config_numa_old_pages_qemu(self): self.flags(virt_type='kvm', group='libvirt') self._test_get_guest_config_numa_unsupported( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION), versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1, host.HV_DRIVER_QEMU, arch.X86_64, exception.NUMATopologyUnsupported, 2048) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset( self, is_able): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology(kb_mem=4194304) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))) ) as (has_min_version_mock, get_host_cap_mock, get_vcpu_pin_set_mock, get_online_cpus_mock): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) # NOTE(ndipanov): we make sure that pin_set was taken into account # when choosing viable cells self.assertEqual(set([2, 3]), cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.cpu.numa) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_non_numa_host_instance_topo(self, is_able): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=0, cpuset=set([0]), memory=1024), objects.InstanceNUMACell( id=1, cpuset=set([2]), memory=1024)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps)): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) self.assertEqual(0, len(cfg.cputune.vcpupin)) self.assertIsNone(cfg.numatune) self.assertIsNotNone(cfg.cpu.numa) for instance_cell, numa_cfg_cell in zip( instance_topology.cells, cfg.cpu.numa.cells): self.assertEqual(instance_cell.id, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_numa_host_instance_topo(self, is_able): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=None), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=None)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_instance_topo_reordered(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=3, cpuset=set([0, 1]), memory=1024), objects.InstanceNUMACell( id=0, cpuset=set([2, 3]), memory=1024)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset) for index, (instance_cell, numa_cfg_cell) in enumerate(zip( instance_topology.cells, cfg.cpu.numa.cells)): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertIsNone(numa_cfg_cell.memAccess) allnodes = set([cell.id for cell in instance_topology.cells]) self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset)) self.assertEqual("strict", cfg.numatune.memory.mode) for index, (instance_cell, memnode) in enumerate(zip( instance_topology.cells, cfg.numatune.memnodes)): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self): instance_topology = objects.InstanceNUMATopology( cells=[objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, cpu_pinning={0: 24, 1: 25}), objects.InstanceNUMACell( id=0, cpuset=set([2, 3]), memory=1024, cpu_pinning={2: 0, 3: 1})]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology( sockets_per_cell=4, cores_per_socket=3, threads_per_core=2) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsNone(cfg.cpuset) # Test that the pinning is correct and limited to allowed only self.assertEqual(0, cfg.cputune.vcpupin[0].id) self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset) self.assertEqual(1, cfg.cputune.vcpupin[1].id) self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset) self.assertEqual(2, cfg.cputune.vcpupin[2].id) self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset) self.assertEqual(3, cfg.cputune.vcpupin[3].id) self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset) self.assertIsNotNone(cfg.cpu.numa) # Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset self.assertIsInstance(cfg.cputune.emulatorpin, vconfig.LibvirtConfigGuestCPUTuneEmulatorPin) self.assertEqual(set([0, 1, 24, 25]), cfg.cputune.emulatorpin.cpuset) for i, (instance_cell, numa_cfg_cell) in enumerate(zip( instance_topology.cells, cfg.cpu.numa.cells)): self.assertEqual(i, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertIsNone(numa_cfg_cell.memAccess) allnodes = set([cell.id for cell in instance_topology.cells]) self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset)) self.assertEqual("strict", cfg.numatune.memory.mode) for i, (instance_cell, memnode) in enumerate(zip( instance_topology.cells, cfg.numatune.memnodes)): self.assertEqual(i, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) def test_get_guest_config_numa_host_mempages_shared(self): instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={}) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertEqual("shared", numa_cfg_cell.memAccess) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) self.assertEqual(0, len(cfg.cputune.vcpusched)) self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset) def test_get_guest_config_numa_host_instance_cpu_pinning_realtime(self): instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) instance_ref = objects.Instance(**self.test_instance) instance_ref.numa_topology = instance_topology image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496, ephemeral_gb=8128, swap=33550336, name='fake', extra_specs={ "hw:cpu_realtime": "yes", "hw:cpu_policy": "dedicated", "hw:cpu_realtime_mask": "^0-1" }) instance_ref.flavor = flavor caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with test.nested( mock.patch.object( objects.InstanceNUMATopology, "get_by_instance_uuid", return_value=instance_topology), mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([2, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set(range(8))), ): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for instance_cell, numa_cfg_cell, index in zip( instance_topology.cells, cfg.cpu.numa.cells, range(len(instance_topology.cells))): self.assertEqual(index, numa_cfg_cell.id) self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus) self.assertEqual(instance_cell.memory * units.Ki, numa_cfg_cell.memory) self.assertEqual("shared", numa_cfg_cell.memAccess) allnodes = [cell.id for cell in instance_topology.cells] self.assertEqual(allnodes, cfg.numatune.memory.nodeset) self.assertEqual("strict", cfg.numatune.memory.mode) for instance_cell, memnode, index in zip( instance_topology.cells, cfg.numatune.memnodes, range(len(instance_topology.cells))): self.assertEqual(index, memnode.cellid) self.assertEqual([instance_cell.id], memnode.nodeset) self.assertEqual("strict", memnode.mode) self.assertEqual(1, len(cfg.cputune.vcpusched)) self.assertEqual("fifo", cfg.cputune.vcpusched[0].scheduler) self.assertEqual(set([2, 3]), cfg.cputune.vcpusched[0].vcpus) self.assertEqual(set([0, 1]), cfg.cputune.emulatorpin.cpuset) def test_get_cpu_numa_config_from_instance(self): topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128), objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128), ]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = drvr._get_cpu_numa_config_from_instance(topology, True) self.assertIsInstance(conf, vconfig.LibvirtConfigGuestCPUNUMA) self.assertEqual(0, conf.cells[0].id) self.assertEqual(set([1, 2]), conf.cells[0].cpus) self.assertEqual(131072, conf.cells[0].memory) self.assertEqual("shared", conf.cells[0].memAccess) self.assertEqual(1, conf.cells[1].id) self.assertEqual(set([3, 4]), conf.cells[1].cpus) self.assertEqual(131072, conf.cells[1].memory) self.assertEqual("shared", conf.cells[1].memAccess) def test_get_cpu_numa_config_from_instance_none(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) conf = drvr._get_cpu_numa_config_from_instance(None, False) self.assertIsNone(conf) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", return_value=True) def test_get_memnode_numa_config_from_instance(self, mock_numa): instance_topology = objects.InstanceNUMATopology(cells=[ objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128), objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128), objects.InstanceNUMACell(id=16, cpuset=set([5, 6]), memory=128) ]) host_topology = objects.NUMATopology( cells=[ objects.NUMACell( id=0, cpuset=set([1, 2]), memory=1024, mempages=[]), objects.NUMACell( id=1, cpuset=set([3, 4]), memory=1024, mempages=[]), objects.NUMACell( id=16, cpuset=set([5, 6]), memory=1024, mempages=[])]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with test.nested( mock.patch.object(drvr, "_get_host_numa_topology", return_value=host_topology)): guest_numa_config = drvr._get_guest_numa_config(instance_topology, flavor={}, allowed_cpus=[1, 2, 3, 4, 5, 6], image_meta={}) self.assertEqual(2, guest_numa_config.numatune.memnodes[2].cellid) self.assertEqual([16], guest_numa_config.numatune.memnodes[2].nodeset) self.assertEqual(set([5, 6]), guest_numa_config.numaconfig.cells[2].cpus) @mock.patch.object(host.Host, 'has_version', return_value=True) def test_has_cpu_policy_support(self, mock_has_version): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(exception.CPUPinningNotSupported, drvr._has_cpu_policy_support) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", return_value=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support", return_value=True) @mock.patch.object(host.Host, "get_capabilities") def test_does_not_want_hugepages(self, mock_caps, mock_hp, mock_numa): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=4), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=4)]) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() mock_caps.return_value = caps host_topology = drvr._get_host_numa_topology() self.assertFalse(drvr._wants_hugepages(None, None)) self.assertFalse(drvr._wants_hugepages(host_topology, None)) self.assertFalse(drvr._wants_hugepages(None, instance_topology)) self.assertFalse(drvr._wants_hugepages(host_topology, instance_topology)) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support", return_value=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_hugepage_support", return_value=True) @mock.patch.object(host.Host, "get_capabilities") def test_does_want_hugepages(self, mock_caps, mock_hp, mock_numa): for each_arch in [arch.I686, arch.X86_64, arch.PPC64LE, arch.PPC64]: self._test_does_want_hugepages( mock_caps, mock_hp, mock_numa, each_arch) def _test_does_want_hugepages(self, mock_caps, mock_hp, mock_numa, architecture): self.flags(reserved_huge_pages=[ {'node': 0, 'size': 2048, 'count': 128}, {'node': 1, 'size': 2048, 'count': 1}, {'node': 3, 'size': 2048, 'count': 64}]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_topology = objects.InstanceNUMATopology( cells=[ objects.InstanceNUMACell( id=1, cpuset=set([0, 1]), memory=1024, pagesize=2048), objects.InstanceNUMACell( id=2, cpuset=set([2, 3]), memory=1024, pagesize=2048)]) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = architecture caps.host.topology = self._fake_caps_numa_topology() mock_caps.return_value = caps host_topology = drvr._get_host_numa_topology() self.assertEqual(128, host_topology.cells[0].mempages[1].reserved) self.assertEqual(1, host_topology.cells[1].mempages[1].reserved) self.assertEqual(0, host_topology.cells[2].mempages[1].reserved) self.assertEqual(64, host_topology.cells[3].mempages[1].reserved) self.assertTrue(drvr._wants_hugepages(host_topology, instance_topology)) def test_get_guest_config_clock(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) hpet_map = { arch.X86_64: True, arch.I686: True, arch.PPC: False, arch.PPC64: False, arch.ARMV7: False, arch.AARCH64: False, } for guestarch, expect_hpet in hpet_map.items(): with mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=guestarch): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "utc") self.assertIsInstance(cfg.clock.timers[0], vconfig.LibvirtConfigGuestTimer) self.assertIsInstance(cfg.clock.timers[1], vconfig.LibvirtConfigGuestTimer) self.assertEqual(cfg.clock.timers[0].name, "pit") self.assertEqual(cfg.clock.timers[0].tickpolicy, "delay") self.assertEqual(cfg.clock.timers[1].name, "rtc") self.assertEqual(cfg.clock.timers[1].tickpolicy, "catchup") if expect_hpet: self.assertEqual(3, len(cfg.clock.timers)) self.assertIsInstance(cfg.clock.timers[2], vconfig.LibvirtConfigGuestTimer) self.assertEqual('hpet', cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) else: self.assertEqual(2, len(cfg.clock.timers)) @mock.patch.object(libvirt_utils, 'get_arch') @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows(self, mock_version, mock_get_arch): mock_version.return_value = False mock_get_arch.return_value = arch.I686 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(3, len(cfg.clock.timers), cfg.clock.timers) self.assertEqual("pit", cfg.clock.timers[0].name) self.assertEqual("rtc", cfg.clock.timers[1].name) self.assertEqual("hpet", cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) @mock.patch.object(libvirt_utils, 'get_arch') @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows_timer(self, mock_version, mock_get_arch): mock_version.return_value = True mock_get_arch.return_value = arch.I686 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(4, len(cfg.clock.timers), cfg.clock.timers) self.assertEqual("pit", cfg.clock.timers[0].name) self.assertEqual("rtc", cfg.clock.timers[1].name) self.assertEqual("hpet", cfg.clock.timers[2].name) self.assertFalse(cfg.clock.timers[2].present) self.assertEqual("hypervclock", cfg.clock.timers[3].name) self.assertTrue(cfg.clock.timers[3].present) self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureHyperV) @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_config_windows_hyperv_feature2(self, mock_version): mock_version.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['os_type'] = 'windows' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(cfg.clock, vconfig.LibvirtConfigGuestClock) self.assertEqual(cfg.clock.offset, "localtime") self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureHyperV) self.assertTrue(cfg.features[2].relaxed) self.assertTrue(cfg.features[2].spinlocks) self.assertEqual(8191, cfg.features[2].spinlock_retries) self.assertTrue(cfg.features[2].vapic) def test_get_guest_config_with_two_nics(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 2), image_meta, disk_info) self.assertEqual(2, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureAPIC) self.assertEqual(cfg.memory, instance_ref.flavor.memory_mb * units.Ki) self.assertEqual(cfg.vcpus, instance_ref.flavor.vcpus) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_boot_dev, ["hd"]) self.assertIsNone(cfg.os_root) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) def test_get_guest_config_bug_1118829(self): self.flags(virt_type='uml', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) disk_info = {'disk_bus': 'virtio', 'cdrom_bus': 'ide', 'mapping': {u'vda': {'bus': 'virtio', 'type': 'disk', 'dev': u'vda'}, 'root': {'bus': 'virtio', 'type': 'disk', 'dev': 'vda'}}} # NOTE(jdg): For this specific test leave this blank # This will exercise the failed code path still, # and won't require fakes and stubs of the iscsi discovery block_device_info = {} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, block_device_info) self.assertEqual(instance_ref['root_device_name'], '/dev/vda') def test_get_guest_config_with_root_device_name(self): self.flags(virt_type='uml', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) block_device_info = {'root_device_name': '/dev/vdb'} disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, block_device_info) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, block_device_info) self.assertEqual(0, len(cfg.features)) self.assertEqual(cfg.memory, instance_ref.flavor.memory_mb * units.Ki) self.assertEqual(cfg.vcpus, instance_ref.flavor.vcpus) self.assertEqual(cfg.os_type, "uml") self.assertEqual(cfg.os_boot_dev, []) self.assertEqual(cfg.os_root, '/dev/vdb') self.assertEqual(len(cfg.devices), 3) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) def test_has_uefi_support_with_invalid_version(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) with mock.patch.object(drvr._host, 'has_min_version', return_value=False): self.assertFalse(drvr._has_uefi_support()) def test_has_uefi_support_not_supported_arch(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "alpha" self.assertFalse(drvr._has_uefi_support()) @mock.patch('os.path.exists', return_value=False) def test_has_uefi_support_with_no_loader_existed(self, mock_exist): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertFalse(drvr._has_uefi_support()) @mock.patch('os.path.exists', return_value=True) def test_has_uefi_support(self, mock_has_version): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" with mock.patch.object(drvr._host, 'has_min_version', return_value=True): self.assertTrue(drvr._has_uefi_support()) def test_get_guest_config_with_uefi(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_firmware_type": "uefi"}}) instance_ref = objects.Instance(**self.test_instance) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with mock.patch.object(drvr, "_has_uefi_support", return_value=True) as mock_support: cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) mock_support.assert_called_once_with() self.assertEqual(cfg.os_loader_type, "pflash") def test_get_guest_config_with_block_device(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdc'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdd'}), ] ) info = {'block_device_mapping': driver_block_device.convert_volumes( bdms )} info['block_device_mapping'][0]['connection_info'] = conn_info info['block_device_mapping'][1]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, info) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'vdc') self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[3].target_dev, 'vdd') mock_save.assert_called_with() def test_get_guest_config_lxc_with_attached_volume(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'boot_index': 0}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', }), fake_block_device.FakeDbBlockDeviceDict( {'id': 3, 'source_type': 'volume', 'destination_type': 'volume', }), ] ) info = {'block_device_mapping': driver_block_device.convert_volumes( bdms )} info['block_device_mapping'][0]['connection_info'] = conn_info info['block_device_mapping'][1]['connection_info'] = conn_info info['block_device_mapping'][2]['connection_info'] = conn_info info['block_device_mapping'][0]['mount_device'] = '/dev/vda' info['block_device_mapping'][1]['mount_device'] = '/dev/vdc' info['block_device_mapping'][2]['mount_device'] = '/dev/vdd' with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, None, info) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[1].target_dev, 'vdc') self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'vdd') mock_save.assert_called_with() def test_get_guest_config_with_configdrive(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # make configdrive.required_by() return True instance_ref['config_drive'] = True disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) # The last device is selected for this. on x86 is the last ide # device (hdd). Since power only support scsi, the last device # is sdz expect = {"ppc": "sdz", "ppc64": "sdz", "ppc64le": "sdz", "aarch64": "sdz"} disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd") self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, disk) def test_get_guest_config_with_virtio_scsi_bus(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_scsi_model": "virtio-scsi"}}) instance_ref = objects.Instance(**self.test_instance) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, []) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestController) self.assertEqual(cfg.devices[2].model, 'virtio-scsi') def test_get_guest_config_with_virtio_scsi_bus_bdm(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_scsi_model": "virtio-scsi"}}) instance_ref = objects.Instance(**self.test_instance) conn_info = {'driver_volume_type': 'fake'} bdms = block_device_obj.block_device_make_list_from_dicts( self.context, [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdc', 'disk_bus': 'scsi'}), fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sdd', 'disk_bus': 'scsi'}), ] ) bd_info = { 'block_device_mapping': driver_block_device.convert_volumes(bdms)} bd_info['block_device_mapping'][0]['connection_info'] = conn_info bd_info['block_device_mapping'][1]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, bd_info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info, [], bd_info) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[2].target_dev, 'sdc') self.assertEqual(cfg.devices[2].target_bus, 'scsi') self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[3].target_dev, 'sdd') self.assertEqual(cfg.devices[3].target_bus, 'scsi') self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestController) self.assertEqual(cfg.devices[4].model, 'virtio-scsi') mock_save.assert_called_with() def test_get_guest_config_with_vnc(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', group='libvirt') self.flags(pointer_model='ps2mouse') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 7) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "vnc") def test_get_guest_config_with_vnc_and_tablet(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") def test_get_guest_config_with_spice_and_tablet(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "spice") def test_get_guest_config_with_spice_and_agent(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0") self.assertEqual(cfg.devices[5].type, "spice") self.assertEqual(cfg.devices[6].type, "qxl") @mock.patch('nova.console.serial.acquire_port') @mock.patch('nova.virt.hardware.get_number_of_serial_ports', return_value=1) @mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',) def test_create_serial_console_devices_based_on_arch(self, mock_get_arch, mock_get_port_number, mock_acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) expected = {arch.X86_64: vconfig.LibvirtConfigGuestSerial, arch.S390: vconfig.LibvirtConfigGuestConsole, arch.S390X: vconfig.LibvirtConfigGuestConsole} for guest_arch, device_type in expected.items(): mock_get_arch.return_value = guest_arch guest = vconfig.LibvirtConfigGuest() drvr._create_serial_console_devices(guest, instance=None, flavor={}, image_meta={}) self.assertEqual(1, len(guest.devices)) console_device = guest.devices[0] self.assertIsInstance(console_device, device_type) self.assertEqual("tcp", console_device.type) @mock.patch('nova.virt.hardware.get_number_of_serial_ports', return_value=4) @mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', side_effect=[arch.X86_64, arch.S390, arch.S390X]) def test_create_serial_console_devices_with_limit_exceeded_based_on_arch( self, mock_get_arch, mock_get_port_number): self.flags(enabled=True, group='serial_console') self.flags(virt_type="qemu", group='libvirt') flavor = 'fake_flavor' image_meta = objects.ImageMeta() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) guest = vconfig.LibvirtConfigGuest() self.assertRaises(exception.SerialPortNumberLimitExceeded, drvr._create_serial_console_devices, guest, None, flavor, image_meta) mock_get_arch.assert_called_with(image_meta) mock_get_port_number.assert_called_with(flavor, image_meta) drvr._create_serial_console_devices(guest, None, flavor, image_meta) mock_get_arch.assert_called_with(image_meta) mock_get_port_number.assert_called_with(flavor, image_meta) drvr._create_serial_console_devices(guest, None, flavor, image_meta) mock_get_arch.assert_called_with(image_meta) mock_get_port_number.assert_called_with(flavor, image_meta) @mock.patch('nova.console.serial.acquire_port') def test_get_guest_config_serial_console(self, acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) acquire_port.return_value = 11111 cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(8, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual(11111, cfg.devices[2].listen_port) def test_get_guest_config_serial_console_through_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': 3} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual("tcp", cfg.devices[3].type) self.assertEqual("tcp", cfg.devices[4].type) def test_get_guest_config_serial_console_invalid_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': "a"} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises( exception.ImageSerialPortNumberInvalid, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_serial_console_image_and_flavor(self): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_serial_port_count": "3"}}) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw:serial_port_count': 4} disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10, len(cfg.devices), cfg.devices) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("tcp", cfg.devices[2].type) self.assertEqual("tcp", cfg.devices[3].type) self.assertEqual("tcp", cfg.devices[4].type) @mock.patch('nova.console.serial.acquire_port') def test_get_guest_config_serial_console_through_port_rng_exhausted( self, acquire_port): self.flags(enabled=True, group='serial_console') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) acquire_port.side_effect = exception.SocketPortRangeExhaustedException( '127.0.0.1') self.assertRaises( exception.SocketPortRangeExhaustedException, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) @mock.patch('nova.console.serial.release_port') @mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') @mock.patch.object(host.Host, 'get_guest') @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_serial_ports_from_guest') def test_serial_console_release_port( self, mock_get_serial_ports_from_guest, mock_get_guest, mock_get_info, mock_release_port): self.flags(enabled="True", group='serial_console') guest = libvirt_guest.Guest(FakeVirtDomain()) guest.power_off = mock.Mock() mock_get_info.return_value = hardware.InstanceInfo( state=power_state.SHUTDOWN) mock_get_guest.return_value = guest mock_get_serial_ports_from_guest.return_value = iter([ ('127.0.0.1', 10000), ('127.0.0.1', 10001)]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._destroy(objects.Instance(**self.test_instance)) mock_release_port.assert_has_calls( [mock.call(host='127.0.0.1', port=10000), mock.call(host='127.0.0.1', port=10001)]) @mock.patch('os.path.getsize', return_value=0) # size doesn't matter @mock.patch('nova.virt.libvirt.storage.lvm.get_volume_size', return_value='fake-size') def test_detach_encrypted_volumes(self, mock_getsize, mock_get_volume_size): """Test that unencrypted volumes are not disconnected with dmcrypt.""" instance = objects.Instance(**self.test_instance) xml = """ <domain type='kvm'> <devices> <disk type='file'> <driver name='fake-driver' type='fake-type' /> <source file='filename'/> <target dev='vdc' bus='virtio'/> </disk> <disk type='block' device='disk'> <driver name='fake-driver' type='fake-type' /> <source dev='/dev/mapper/disk'/> <target dev='vda'/> </disk> <disk type='block' device='disk'> <driver name='fake-driver' type='fake-type' /> <source dev='/dev/mapper/swap'/> <target dev='vdb'/> </disk> </devices> </domain> """ dom = FakeVirtDomain(fake_xml=xml) instance.ephemeral_key_uuid = uuids.ephemeral_key_uuid # encrypted conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @mock.patch.object(dmcrypt, 'delete_volume') @mock.patch.object(conn._host, 'get_domain', return_value=dom) def detach_encrypted_volumes(block_device_info, mock_get_domain, mock_delete_volume): conn._detach_encrypted_volumes(instance, block_device_info) mock_get_domain.assert_called_once_with(instance) self.assertFalse(mock_delete_volume.called) block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} detach_encrypted_volumes(block_device_info) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest(None, mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.1', 101), ('127.0.0.2', 100), ('127.0.0.2', 101)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_bind_only(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest('bind', mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 101), ('127.0.0.2', 100)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_connect_only(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest('connect', mock_get_xml_desc) self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.2', 101)], list(i)) @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_serial_ports_from_guest_on_s390(self, mock_get_xml_desc): i = self._test_get_serial_ports_from_guest(None, mock_get_xml_desc, 'console') self.assertEqual([ ('127.0.0.1', 100), ('127.0.0.1', 101), ('127.0.0.2', 100), ('127.0.0.2', 101)], list(i)) def _test_get_serial_ports_from_guest(self, mode, mock_get_xml_desc, dev_name='serial'): xml = """ <domain type='kvm'> <devices> <%(dev_name)s type="tcp"> <source host="127.0.0.1" service="100" mode="connect"/> </%(dev_name)s> <%(dev_name)s type="tcp"> <source host="127.0.0.1" service="101" mode="bind"/> </%(dev_name)s> <%(dev_name)s type="tcp"> <source host="127.0.0.2" service="100" mode="bind"/> </%(dev_name)s> <%(dev_name)s type="tcp"> <source host="127.0.0.2" service="101" mode="connect"/> </%(dev_name)s> </devices> </domain>""" % {'dev_name': dev_name} mock_get_xml_desc.return_value = xml drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) guest = libvirt_guest.Guest(FakeVirtDomain()) return drvr._get_serial_ports_from_guest(guest, mode=mode) def test_get_guest_config_with_type_xen(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 6) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestConsole) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[3].type, "vnc") self.assertEqual(cfg.devices[4].type, "xen") @mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=arch.S390X) def test_get_guest_config_with_type_kvm_on_s390(self, mock_get_arch): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=False, group='libvirt') self._stub_host_capabilities_cpu_arch(arch.S390X) instance_ref = objects.Instance(**self.test_instance) cfg = self._get_guest_config_via_fake_api(instance_ref) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) log_file_device = cfg.devices[2] self.assertIsInstance(log_file_device, vconfig.LibvirtConfigGuestConsole) self.assertEqual("sclplm", log_file_device.target_type) self.assertEqual("file", log_file_device.type) terminal_device = cfg.devices[3] self.assertIsInstance(terminal_device, vconfig.LibvirtConfigGuestConsole) self.assertEqual("sclp", terminal_device.target_type) self.assertEqual("pty", terminal_device.type) self.assertEqual("s390-ccw-virtio", cfg.os_mach_type) def _stub_host_capabilities_cpu_arch(self, cpu_arch): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = cpu_arch caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) def _get_guest_config_via_fake_api(self, instance): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) return drvr._get_guest_config(instance, [], image_meta, disk_info) def test_get_guest_config_with_type_xen_pae_hvm(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref['vm_mode'] = vm_mode.HVM image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(cfg.os_type, vm_mode.HVM) self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path) self.assertEqual(3, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeaturePAE) self.assertIsInstance(cfg.features[1], vconfig.LibvirtConfigGuestFeatureACPI) self.assertIsInstance(cfg.features[2], vconfig.LibvirtConfigGuestFeatureAPIC) def test_get_guest_config_with_type_xen_pae_pvm(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='xen', use_usb_tablet=False, group='libvirt') self.flags(enabled=False, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(cfg.os_type, vm_mode.XEN) self.assertEqual(1, len(cfg.features)) self.assertIsInstance(cfg.features[0], vconfig.LibvirtConfigGuestFeaturePAE) def test_get_guest_config_with_vnc_and_spice(self): self.flags(enabled=True, group='vnc') self.flags(virt_type='kvm', use_usb_tablet=True, group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 10) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[9], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0") self.assertEqual(cfg.devices[6].type, "vnc") self.assertEqual(cfg.devices[7].type, "spice") def test_get_guest_config_with_watchdog_action_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_watchdog_action": "none"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 9) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("none", cfg.devices[7].action) def _test_get_guest_usb_tablet(self, vnc_enabled, spice_enabled, os_type, agent_enabled=False, image_meta=None): self.flags(enabled=vnc_enabled, group='vnc') self.flags(enabled=spice_enabled, agent_enabled=agent_enabled, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict(image_meta) return drvr._get_guest_pointer_model(os_type, image_meta) def test_use_ps2_mouse(self): self.flags(pointer_model='ps2mouse') tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM) self.assertIsNone(tablet) def test_get_guest_usb_tablet_wipe(self): self.flags(use_usb_tablet=True, group='libvirt') tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(True, False, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(False, True, vm_mode.HVM) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet(False, False, vm_mode.HVM) self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet(True, True, "foo") self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet( False, True, vm_mode.HVM, True) self.assertIsNone(tablet) def test_get_guest_usb_tablet_image_meta(self): self.flags(use_usb_tablet=True, group='libvirt') image_meta = {"properties": {"hw_pointer_model": "usbtablet"}} tablet = self._test_get_guest_usb_tablet( True, True, vm_mode.HVM, image_meta=image_meta) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet( True, False, vm_mode.HVM, image_meta=image_meta) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet( False, True, vm_mode.HVM, image_meta=image_meta) self.assertIsNotNone(tablet) tablet = self._test_get_guest_usb_tablet( False, False, vm_mode.HVM, image_meta=image_meta) self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet( True, True, "foo", image_meta=image_meta) self.assertIsNone(tablet) tablet = self._test_get_guest_usb_tablet( False, True, vm_mode.HVM, True, image_meta=image_meta) self.assertIsNone(tablet) def test_get_guest_usb_tablet_image_meta_no_vnc(self): self.flags(use_usb_tablet=False, group='libvirt') self.flags(pointer_model=None) image_meta = {"properties": {"hw_pointer_model": "usbtablet"}} self.assertRaises( exception.UnsupportedPointerModelRequested, self._test_get_guest_usb_tablet, False, False, vm_mode.HVM, True, image_meta=image_meta) def test_get_guest_no_pointer_model_usb_tablet_set(self): self.flags(use_usb_tablet=True, group='libvirt') self.flags(pointer_model=None) tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM) self.assertIsNotNone(tablet) def test_get_guest_no_pointer_model_usb_tablet_not_set(self): self.flags(use_usb_tablet=False, group='libvirt') self.flags(pointer_model=None) tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM) self.assertIsNone(tablet) def test_get_guest_pointer_model_usb_tablet(self): self.flags(use_usb_tablet=False, group='libvirt') self.flags(pointer_model='usbtablet') tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM) self.assertIsNotNone(tablet) def test_get_guest_pointer_model_usb_tablet_image(self): image_meta = {"properties": {"hw_pointer_model": "usbtablet"}} tablet = self._test_get_guest_usb_tablet( True, True, vm_mode.HVM, image_meta=image_meta) self.assertIsNotNone(tablet) def test_get_guest_pointer_model_usb_tablet_image_no_HVM(self): self.flags(pointer_model=None) self.flags(use_usb_tablet=False, group='libvirt') image_meta = {"properties": {"hw_pointer_model": "usbtablet"}} self.assertRaises( exception.UnsupportedPointerModelRequested, self._test_get_guest_usb_tablet, True, True, vm_mode.XEN, image_meta=image_meta) def _test_get_guest_config_with_watchdog_action_flavor(self, hw_watchdog_action="hw:watchdog_action"): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {hw_watchdog_action: 'none'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(9, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("none", cfg.devices[7].action) def test_get_guest_config_with_watchdog_action_through_flavor(self): self._test_get_guest_config_with_watchdog_action_flavor() # TODO(pkholkin): the test accepting old property name 'hw_watchdog_action' # should be removed in the next release def test_get_guest_config_with_watchdog_action_through_flavor_no_scope( self): self._test_get_guest_config_with_watchdog_action_flavor( hw_watchdog_action="hw_watchdog_action") def test_get_guest_config_with_watchdog_overrides_flavor(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_watchdog_action': 'none'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_watchdog_action": "pause"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(9, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestWatchdog) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual("pause", cfg.devices[7].action) def test_get_guest_config_with_video_driver_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "vmvga"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[5].type, "vnc") self.assertEqual(cfg.devices[6].type, "vmvga") def test_get_guest_config_with_qga_through_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_qemu_guest_agent": "yes"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 9) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[8], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") self.assertEqual(cfg.devices[7].type, "unix") self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0") def test_get_guest_config_with_video_driver_vram(self): self.flags(enabled=False, group='vnc') self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_video:ram_max_mb': "100"} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestChannel) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[5].type, "spice") self.assertEqual(cfg.devices[6].type, "qxl") self.assertEqual(cfg.devices[6].vram, 64 * units.Mi / units.Ki) @mock.patch('nova.virt.disk.api.teardown_container') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_unmount_fs_if_error_during_lxc_create_domain(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_teardown): """If we hit an error during a `_create_domain` call to `libvirt+lxc` we need to ensure the guest FS is unmounted from the host so that any future `lvremove` calls will work. """ self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.side_effect = exception.InstanceNotFound( instance_id='foo') drvr._conn.defineXML = mock.Mock() drvr._conn.defineXML.side_effect = ValueError('somethingbad') with test.nested( mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, 'cleanup')): self.assertRaises(ValueError, drvr._create_domain_and_network, self.context, 'xml', mock_instance, None, None) mock_teardown.assert_called_with(container_dir='/tmp/rootfs') def test_video_driver_flavor_limit_not_set(self): self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with mock.patch.object(objects.Instance, 'save'): self.assertRaises(exception.RequestedVRamTooHigh, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_video_driver_ram_above_flavor_limit(self): self.flags(virt_type='kvm', group='libvirt') self.flags(enabled=True, agent_enabled=True, group='spice') instance_ref = objects.Instance(**self.test_instance) instance_type = instance_ref.get_flavor() instance_type.extra_specs = {'hw_video:ram_max_mb': "50"} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_video_model": "qxl", "hw_video_ram": "64"}}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) with mock.patch.object(objects.Instance, 'save'): self.assertRaises(exception.RequestedVRamTooHigh, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) def test_get_guest_config_without_qga_through_image_meta(self): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_qemu_guest_agent": "no"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[4].type, "tablet") self.assertEqual(cfg.devices[5].type, "vnc") def test_get_guest_config_with_rng_device(self): self.flags(virt_type='kvm', group='libvirt') self.flags(pointer_model='ps2mouse') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertIsNone(cfg.devices[6].backend) self.assertIsNone(cfg.devices[6].rate_bytes) self.assertIsNone(cfg.devices[6].rate_period) def test_get_guest_config_with_rng_not_allowed(self): self.flags(virt_type='kvm', group='libvirt') self.flags(pointer_model='ps2mouse') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 7) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigMemoryBalloon) def test_get_guest_config_with_rng_limits(self): self.flags(virt_type='kvm', group='libvirt') self.flags(pointer_model='ps2mouse') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True', 'hw_rng:rate_bytes': '1024', 'hw_rng:rate_period': '2'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertIsNone(cfg.devices[6].backend) self.assertEqual(cfg.devices[6].rate_bytes, 1024) self.assertEqual(cfg.devices[6].rate_period, 2) @mock.patch('nova.virt.libvirt.driver.os.path.exists') def test_get_guest_config_with_rng_backend(self, mock_path): self.flags(virt_type='kvm', rng_dev_path='/dev/hw_rng', group='libvirt') self.flags(pointer_model='ps2mouse') mock_path.return_value = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(len(cfg.devices), 8) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestSerial) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) self.assertIsInstance(cfg.devices[6], vconfig.LibvirtConfigGuestRng) self.assertIsInstance(cfg.devices[7], vconfig.LibvirtConfigMemoryBalloon) self.assertEqual(cfg.devices[6].model, 'random') self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng') self.assertIsNone(cfg.devices[6].rate_bytes) self.assertIsNone(cfg.devices[6].rate_period) @mock.patch('nova.virt.libvirt.driver.os.path.exists') def test_get_guest_config_with_rng_dev_not_present(self, mock_path): self.flags(virt_type='kvm', use_usb_tablet=False, rng_dev_path='/dev/hw_rng', group='libvirt') mock_path.return_value = False drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'} image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_rng_model": "virtio"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(exception.RngDeviceNotExist, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_guest_cpu_shares_with_multi_vcpu(self, is_able): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.vcpus = 4 image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(4096, cfg.cputune.shares) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_with_cpu_quota(self, is_able): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000', 'quota:cpu_period': '20000'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(10000, cfg.cputune.shares) self.assertEqual(20000, cfg.cputune.period) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=True) def test_get_guest_config_with_bogus_cpu_quota(self, is_able): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': 'fishfood', 'quota:cpu_period': '20000'} image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.assertRaises(ValueError, drvr._get_guest_config, instance_ref, [], image_meta, disk_info) @mock.patch.object( host.Host, "is_cpu_control_policy_capable", return_value=False) def test_get_update_guest_cputune(self, is_able): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000', 'quota:cpu_period': '20000'} self.assertRaises( exception.UnsupportedHostCPUControlPolicy, drvr._update_guest_cputune, {}, instance_ref.flavor, "kvm") def _test_get_guest_config_sysinfo_serial(self, expected_serial): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) cfg = drvr._get_guest_config_sysinfo(instance_ref) self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo) self.assertEqual(version.vendor_string(), cfg.system_manufacturer) self.assertEqual(version.product_string(), cfg.system_product) self.assertEqual(version.version_string_with_package(), cfg.system_version) self.assertEqual(expected_serial, cfg.system_serial) self.assertEqual(instance_ref['uuid'], cfg.system_uuid) self.assertEqual("Virtual Machine", cfg.system_family) def test_get_guest_config_sysinfo_serial_none(self): self.flags(sysinfo_serial="none", group="libvirt") self._test_get_guest_config_sysinfo_serial(None) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware") def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid): self.flags(sysinfo_serial="hardware", group="libvirt") theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" mock_uuid.return_value = theuuid self._test_get_guest_config_sysinfo_serial(theuuid) @contextlib.contextmanager def patch_exists(self, result): real_exists = os.path.exists def fake_exists(filename): if filename == "/etc/machine-id": return result return real_exists(filename) with mock.patch.object(os.path, "exists") as mock_exists: mock_exists.side_effect = fake_exists yield mock_exists def test_get_guest_config_sysinfo_serial_os(self): self.flags(sysinfo_serial="os", group="libvirt") theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" with test.nested( mock.patch.object(six.moves.builtins, "open", mock.mock_open(read_data=theuuid)), self.patch_exists(True)): self._test_get_guest_config_sysinfo_serial(theuuid) def test_get_guest_config_sysinfo_serial_os_empty_machine_id(self): self.flags(sysinfo_serial="os", group="libvirt") with test.nested( mock.patch.object(six.moves.builtins, "open", mock.mock_open(read_data="")), self.patch_exists(True)): self.assertRaises(exception.NovaException, self._test_get_guest_config_sysinfo_serial, None) def test_get_guest_config_sysinfo_serial_os_no_machine_id_file(self): self.flags(sysinfo_serial="os", group="libvirt") with self.patch_exists(False): self.assertRaises(exception.NovaException, self._test_get_guest_config_sysinfo_serial, None) def test_get_guest_config_sysinfo_serial_auto_hardware(self): self.flags(sysinfo_serial="auto", group="libvirt") real_exists = os.path.exists with test.nested( mock.patch.object(os.path, "exists"), mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware") ) as (mock_exists, mock_uuid): def fake_exists(filename): if filename == "/etc/machine-id": return False return real_exists(filename) mock_exists.side_effect = fake_exists theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" mock_uuid.return_value = theuuid self._test_get_guest_config_sysinfo_serial(theuuid) def test_get_guest_config_sysinfo_serial_auto_os(self): self.flags(sysinfo_serial="auto", group="libvirt") real_exists = os.path.exists real_open = builtins.open with test.nested( mock.patch.object(os.path, "exists"), mock.patch.object(builtins, "open"), ) as (mock_exists, mock_open): def fake_exists(filename): if filename == "/etc/machine-id": return True return real_exists(filename) mock_exists.side_effect = fake_exists theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc" def fake_open(filename, *args, **kwargs): if filename == "/etc/machine-id": h = mock.MagicMock() h.read.return_value = theuuid h.__enter__.return_value = h return h return real_open(filename, *args, **kwargs) mock_open.side_effect = fake_open self._test_get_guest_config_sysinfo_serial(theuuid) def _create_fake_service_compute(self): service_info = { 'id': 1729, 'host': 'fake', 'report_count': 0 } service_ref = objects.Service(**service_info) compute_info = { 'id': 1729, 'vcpus': 2, 'memory_mb': 1024, 'local_gb': 2048, 'vcpus_used': 0, 'memory_mb_used': 0, 'local_gb_used': 0, 'free_ram_mb': 1024, 'free_disk_gb': 2048, 'hypervisor_type': 'xen', 'hypervisor_version': 1, 'running_vms': 0, 'cpu_info': '', 'current_workload': 0, 'service_id': service_ref['id'], 'host': service_ref['host'] } compute_ref = objects.ComputeNode(**compute_info) return (service_ref, compute_ref) def test_get_guest_config_with_pci_passthrough_kvm(self): self.flags(virt_type='kvm', group='libvirt') service_ref, compute_ref = self._create_fake_service_compute() instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.ALLOCATED, address='0000:00:00.1', compute_id=compute_ref.id, instance_uuid=instance.uuid, request_id=None, extra_info={}) pci_device = objects.PciDevice(**pci_device_info) pci_list = objects.PciDeviceList() pci_list.objects.append(pci_device) instance.pci_devices = pci_list drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) cfg = drvr._get_guest_config(instance, [], image_meta, disk_info) had_pci = 0 # care only about the PCI devices for dev in cfg.devices: if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI: had_pci += 1 self.assertEqual(dev.type, 'pci') self.assertEqual(dev.managed, 'yes') self.assertEqual(dev.mode, 'subsystem') self.assertEqual(dev.domain, "0000") self.assertEqual(dev.bus, "00") self.assertEqual(dev.slot, "00") self.assertEqual(dev.function, "1") self.assertEqual(had_pci, 1) def test_get_guest_config_with_pci_passthrough_xen(self): self.flags(virt_type='xen', group='libvirt') service_ref, compute_ref = self._create_fake_service_compute() instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) pci_device_info = dict(test_pci_device.fake_db_dev) pci_device_info.update(compute_node_id=1, label='fake', status=fields.PciDeviceStatus.ALLOCATED, address='0000:00:00.2', compute_id=compute_ref.id, instance_uuid=instance.uuid, request_id=None, extra_info={}) pci_device = objects.PciDevice(**pci_device_info) pci_list = objects.PciDeviceList() pci_list.objects.append(pci_device) instance.pci_devices = pci_list drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) cfg = drvr._get_guest_config(instance, [], image_meta, disk_info) had_pci = 0 # care only about the PCI devices for dev in cfg.devices: if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI: had_pci += 1 self.assertEqual(dev.type, 'pci') self.assertEqual(dev.managed, 'no') self.assertEqual(dev.mode, 'subsystem') self.assertEqual(dev.domain, "0000") self.assertEqual(dev.bus, "00") self.assertEqual(dev.slot, "00") self.assertEqual(dev.function, "2") self.assertEqual(had_pci, 1) def test_get_guest_config_os_command_line_through_image_meta(self): self.flags(virt_type="kvm", cpu_mode='none', group='libvirt') self.test_instance['kernel_id'] = "fake_kernel_id" drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": "fake_os_command_line"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_cmdline, "fake_os_command_line") def test_get_guest_config_os_command_line_without_kernel_id(self): self.flags(virt_type="kvm", cpu_mode='none', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": "fake_os_command_line"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsNone(cfg.os_cmdline) def test_get_guest_config_os_command_empty(self): self.flags(virt_type="kvm", cpu_mode='none', group='libvirt') self.test_instance['kernel_id'] = "fake_kernel_id" drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"os_command_line": ""}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) # the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by # default, so testing an empty string and None value in the # os_command_line image property must pass cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertNotEqual(cfg.os_cmdline, "") @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_guest_storage_config") @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support") def test_get_guest_config_armv7(self, mock_numa, mock_storage): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.ARMV7 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.flags(virt_type="kvm", group="libvirt") instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "vexpress-a15") @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_guest_storage_config") @mock.patch.object(libvirt_driver.LibvirtDriver, "_has_numa_support") def test_get_guest_config_aarch64(self, mock_numa, mock_storage): def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigGuestCPU() cpu.arch = arch.AARCH64 caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu return caps self.flags(virt_type="kvm", group="libvirt") instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "virt") def test_get_guest_config_machine_type_s390(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigGuestCPU() image_meta = objects.ImageMeta.from_dict(self.test_image_meta) host_cpu_archs = (arch.S390, arch.S390X) for host_cpu_arch in host_cpu_archs: caps.host.cpu.arch = host_cpu_arch os_mach_type = drvr._get_machine_type(image_meta, caps) self.assertEqual('s390-ccw-virtio', os_mach_type) def test_get_guest_config_machine_type_through_image_meta(self): self.flags(virt_type="kvm", group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict({ "disk_format": "raw", "properties": {"hw_machine_type": "fake_machine_type"}}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "fake_machine_type") def test_get_guest_config_machine_type_from_config(self): self.flags(virt_type='kvm', group='libvirt') self.flags(hw_machine_type=['x86_64=fake_machine_type'], group='libvirt') def fake_getCapabilities(): return """ <capabilities> <host> <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid> <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <topology sockets='1' cores='2' threads='1'/> <feature name='xtpr'/> </cpu> </host> </capabilities> """ def fake_baselineCPU(cpu, flag): return """<cpu mode='custom' match='exact'> <model fallback='allow'>Penryn</model> <vendor>Intel</vendor> <feature policy='require' name='xtpr'/> </cpu> """ # Make sure the host arch is mocked as x86_64 self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities, baselineCPU=fake_baselineCPU, getVersion=lambda: 1005001) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual(cfg.os_mach_type, "fake_machine_type") def _test_get_guest_config_ppc64(self, device_index): """Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config. """ self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) expected = (arch.PPC64, arch.PPC) for guestarch in expected: with mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch', return_value=guestarch): cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertIsInstance(cfg.devices[device_index], vconfig.LibvirtConfigGuestVideo) self.assertEqual(cfg.devices[device_index].type, 'vga') def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self): self.flags(enabled=True, group='vnc') self._test_get_guest_config_ppc64(6) def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self): self.flags(enabled=True, agent_enabled=True, group='spice') self._test_get_guest_config_ppc64(8) def _test_get_guest_config_bootmenu(self, image_meta, extra_specs): self.flags(virt_type='kvm', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.extra_specs = extra_specs disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = conn._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertTrue(conf.os_bootmenu) def test_get_guest_config_bootmenu_via_image_meta(self): image_meta = objects.ImageMeta.from_dict( {"disk_format": "raw", "properties": {"hw_boot_menu": "True"}}) self._test_get_guest_config_bootmenu(image_meta, {}) def test_get_guest_config_bootmenu_via_extra_specs(self): image_meta = objects.ImageMeta.from_dict( self.test_image_meta) self._test_get_guest_config_bootmenu(image_meta, {'hw:boot_menu': 'True'}) def test_get_guest_cpu_config_none(self): self.flags(cpu_mode="none", group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertIsNone(conf.cpu.mode) self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_default_kvm(self): self.flags(virt_type="kvm", cpu_mode='none', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertIsNone(conf.cpu.mode) self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_default_uml(self): self.flags(virt_type="uml", cpu_mode='none', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsNone(conf.cpu) def test_get_guest_cpu_config_default_lxc(self): self.flags(virt_type="lxc", cpu_mode='none', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsNone(conf.cpu) def test_get_guest_cpu_config_host_passthrough(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="host-passthrough", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-passthrough") self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_host_model(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="host-model", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-model") self.assertIsNone(conf.cpu.model) self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_config_custom(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(cpu_mode="custom", cpu_model="Penryn", group='libvirt') disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "custom") self.assertEqual(conf.cpu.model, "Penryn") self.assertEqual(conf.cpu.sockets, instance_ref.flavor.vcpus) self.assertEqual(conf.cpu.cores, 1) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_cpu_topology(self): instance_ref = objects.Instance(**self.test_instance) instance_ref.flavor.vcpus = 8 instance_ref.flavor.extra_specs = {'hw:cpu_max_sockets': '4'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) conf = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertIsInstance(conf.cpu, vconfig.LibvirtConfigGuestCPU) self.assertEqual(conf.cpu.mode, "host-model") self.assertEqual(conf.cpu.sockets, 4) self.assertEqual(conf.cpu.cores, 2) self.assertEqual(conf.cpu.threads, 1) def test_get_guest_memory_balloon_config_by_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_disable(self): self.flags(mem_stats_period_seconds=0, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) no_exist = True for device in cfg.devices: if device.root_name == 'memballoon': no_exist = False break self.assertTrue(no_exist) def test_get_guest_memory_balloon_config_period_value(self): self.flags(mem_stats_period_seconds=21, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(21, device.period) def test_get_guest_memory_balloon_config_qemu(self): self.flags(virt_type='qemu', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('virtio', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_xen(self): self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) for device in cfg.devices: if device.root_name == 'memballoon': self.assertIsInstance(device, vconfig.LibvirtConfigMemoryBalloon) self.assertEqual('xen', device.model) self.assertEqual(10, device.period) def test_get_guest_memory_balloon_config_lxc(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) no_exist = True for device in cfg.devices: if device.root_name == 'memballoon': no_exist = False break self.assertTrue(no_exist) @mock.patch('nova.virt.libvirt.driver.LOG.warning') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(host.Host, "get_capabilities") def test_get_supported_perf_events_foo(self, mock_get_caps, mock_min_version, mock_warn): self.flags(enabled_perf_events=['foo'], group='libvirt') caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() mock_get_caps.return_value = caps drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) events = drvr._get_supported_perf_events() self.assertTrue(mock_warn.called) self.assertEqual([], events) @mock.patch.object(host.Host, "get_capabilities") def _test_get_guest_with_perf(self, caps, events, mock_get_caps): mock_get_caps.return_value = caps drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.init_host('test_perf') instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info) self.assertEqual(events, cfg.perf_events) @mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_CMT', True, create=True) @mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_MBMT', True, create=True) @mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_MBML', True, create=True) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_with_perf_supported(self, mock_min_version): self.flags(enabled_perf_events=['cmt', 'mbml', 'mbmt'], group='libvirt') caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() features = [] for f in ('cmt', 'mbm_local', 'mbm_total'): feature = vconfig.LibvirtConfigGuestCPUFeature() feature.name = f feature.policy = cpumodel.POLICY_REQUIRE features.append(feature) caps.host.cpu.features = set(features) self._test_get_guest_with_perf(caps, ['cmt', 'mbml', 'mbmt']) @mock.patch.object(host.Host, 'has_min_version') def test_get_guest_with_perf_libvirt_unsupported(self, mock_min_version): def fake_has_min_version(lv_ver=None, hv_ver=None, hv_type=None): if lv_ver == libvirt_driver.MIN_LIBVIRT_PERF_VERSION: return False return True mock_min_version.side_effect = fake_has_min_version self.flags(enabled_perf_events=['cmt'], group='libvirt') caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" self._test_get_guest_with_perf(caps, []) @mock.patch.object(fakelibvirt, 'VIR_PERF_PARAM_CMT', True, create=True) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_guest_with_perf_host_unsupported(self, mock_min_version): self.flags(enabled_perf_events=['cmt'], group='libvirt') caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = "x86_64" caps.host.topology = self._fake_caps_numa_topology() self._test_get_guest_with_perf(caps, []) def test_xml_and_uri_no_ramdisk_no_kernel(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self): instance_data = dict(self.test_instance) instance_data.update({'vm_mode': vm_mode.HVM}) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, expect_xen_hvm=True) def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self): instance_data = dict(self.test_instance) instance_data.update({'vm_mode': vm_mode.XEN}) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, expect_xen_hvm=False, xen_only=True) def test_xml_and_uri_no_ramdisk(self): instance_data = dict(self.test_instance) instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=False) def test_xml_and_uri_no_kernel(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) def test_xml_and_uri(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True) def test_xml_and_uri_rescue(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True, rescue=instance_data) def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False, rescue=instance_data) def test_xml_and_uri_rescue_no_kernel(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=True, rescue=instance_data) def test_xml_and_uri_rescue_no_ramdisk(self): instance_data = dict(self.test_instance) instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=False, rescue=instance_data) def test_xml_uuid(self): self._check_xml_and_uuid(self.test_image_meta) def test_lxc_container_and_uri(self): instance_data = dict(self.test_instance) self._check_xml_and_container(instance_data) def test_xml_disk_prefix(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_prefix(instance_data, None) def test_xml_user_specified_disk_prefix(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_prefix(instance_data, 'sd') def test_xml_disk_driver(self): instance_data = dict(self.test_instance) self._check_xml_and_disk_driver(instance_data) def test_xml_disk_bus_virtio(self): image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self._check_xml_and_disk_bus(image_meta, None, (("disk", "virtio", "vda"),)) def test_xml_disk_bus_ide(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi expected = {arch.PPC: ("cdrom", "scsi", "sda"), arch.PPC64: ("cdrom", "scsi", "sda"), arch.PPC64LE: ("cdrom", "scsi", "sda"), arch.AARCH64: ("cdrom", "scsi", "sda")} expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}), ("cdrom", "ide", "hda")) image_meta = objects.ImageMeta.from_dict({ "disk_format": "iso"}) self._check_xml_and_disk_bus(image_meta, None, (expec_val,)) def test_xml_disk_bus_ide_and_virtio(self): # It's necessary to check if the architecture is power, because # power doesn't have support to ide, and so libvirt translate # all ide calls to scsi expected = {arch.PPC: ("cdrom", "scsi", "sda"), arch.PPC64: ("cdrom", "scsi", "sda"), arch.PPC64LE: ("cdrom", "scsi", "sda"), arch.AARCH64: ("cdrom", "scsi", "sda")} swap = {'device_name': '/dev/vdc', 'swap_size': 1} ephemerals = [{'device_type': 'disk', 'disk_bus': 'virtio', 'device_name': '/dev/vdb', 'size': 1}] block_device_info = { 'swap': swap, 'ephemerals': ephemerals} expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}), ("cdrom", "ide", "hda")) image_meta = objects.ImageMeta.from_dict({ "disk_format": "iso"}) self._check_xml_and_disk_bus(image_meta, block_device_info, (expec_val, ("disk", "virtio", "vdb"), ("disk", "virtio", "vdc"))) @mock.patch.object(host.Host, "list_instance_domains") def test_list_instances(self, mock_list): vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vm3 = FakeVirtDomain(name="instance00000003") vm4 = FakeVirtDomain(name="instance00000004") mock_list.return_value = [vm1, vm2, vm3, vm4] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) names = drvr.list_instances() self.assertEqual(names[0], vm1.name()) self.assertEqual(names[1], vm2.name()) self.assertEqual(names[2], vm3.name()) self.assertEqual(names[3], vm4.name()) mock_list.assert_called_with(only_guests=True, only_running=False) @mock.patch.object(host.Host, "list_instance_domains") def test_list_instance_uuids(self, mock_list): vm1 = FakeVirtDomain(id=3, name="instance00000001") vm2 = FakeVirtDomain(id=17, name="instance00000002") vm3 = FakeVirtDomain(name="instance00000003") vm4 = FakeVirtDomain(name="instance00000004") mock_list.return_value = [vm1, vm2, vm3, vm4] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) uuids = drvr.list_instance_uuids() self.assertEqual(len(uuids), 4) self.assertEqual(uuids[0], vm1.UUIDString()) self.assertEqual(uuids[1], vm2.UUIDString()) self.assertEqual(uuids[2], vm3.UUIDString()) self.assertEqual(uuids[3], vm4.UUIDString()) mock_list.assert_called_with(only_guests=True, only_running=False) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.flags(vcpu_pin_set="4-5") get_online_cpus.return_value = set([4, 5, 6]) expected_vcpus = 2 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_out_of_range(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.flags(vcpu_pin_set="4-6") get_online_cpus.return_value = set([4, 5]) self.assertRaises(exception.Invalid, drvr._get_vcpu_total) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_libvirt_error(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) self.flags(vcpu_pin_set="4-6") get_online_cpus.side_effect = not_supported_exc self.assertRaises(exception.Invalid, drvr._get_vcpu_total) @mock.patch('nova.virt.libvirt.host.Host.get_online_cpus') def test_get_host_vcpus_libvirt_error_success(self, get_online_cpus): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) self.flags(vcpu_pin_set="1") get_online_cpus.side_effect = not_supported_exc expected_vcpus = 1 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch('nova.virt.libvirt.host.Host.get_cpu_count') def test_get_host_vcpus_after_hotplug(self, get_cpu_count): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) get_cpu_count.return_value = 2 expected_vcpus = 2 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) get_cpu_count.return_value = 3 expected_vcpus = 3 vcpus = drvr._get_vcpu_total() self.assertEqual(expected_vcpus, vcpus) @mock.patch.object(host.Host, "has_min_version", return_value=True) def test_quiesce(self, mock_has_min_version): self.create_fake_libvirt_mock(lookupByName=self.fake_lookup) with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_qemu_guest_agent": "yes"}}) self.assertIsNone(drvr.quiesce(self.context, instance, image_meta)) mock_fsfreeze.assert_called_once_with() def test_quiesce_not_supported(self): self.create_fake_libvirt_mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) self.assertRaises(exception.InstanceQuiesceNotSupported, drvr.quiesce, self.context, instance, None) @mock.patch.object(host.Host, "has_min_version", return_value=True) def test_unquiesce(self, mock_has_min_version): self.create_fake_libvirt_mock(getLibVersion=lambda: 1002005, lookupByName=self.fake_lookup) with mock.patch.object(FakeVirtDomain, "fsThaw") as mock_fsthaw: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict( {"properties": {"hw_qemu_guest_agent": "yes"}}) self.assertIsNone(drvr.unquiesce(self.context, instance, image_meta)) mock_fsthaw.assert_called_once_with() def test_create_snapshot_metadata(self): base = objects.ImageMeta.from_dict( {'disk_format': 'raw'}) instance_data = {'kernel_id': 'kernel', 'project_id': 'prj_id', 'ramdisk_id': 'ram_id', 'os_type': None} instance = objects.Instance(**instance_data) img_fmt = 'raw' snp_name = 'snapshot_name' drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name) expected = {'is_public': False, 'status': 'active', 'name': snp_name, 'properties': { 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', 'image_state': 'available', 'owner_id': instance['project_id'], 'ramdisk_id': instance['ramdisk_id'], }, 'disk_format': img_fmt, 'container_format': 'bare', } self.assertEqual(ret, expected) # simulate an instance with os_type field defined # disk format equals to ami # container format not equals to bare instance['os_type'] = 'linux' base = objects.ImageMeta.from_dict( {'disk_format': 'ami', 'container_format': 'test_container'}) expected['properties']['os_type'] = instance['os_type'] expected['disk_format'] = base.disk_format expected['container_format'] = base.container_format ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name) self.assertEqual(ret, expected) def test_get_volume_driver(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'fake', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} driver = conn._get_volume_driver(connection_info) result = isinstance(driver, volume_drivers.LibvirtFakeVolumeDriver) self.assertTrue(result) def test_get_volume_driver_unknown(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'unknown', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} self.assertRaises( exception.VolumeDriverNotFound, conn._get_volume_driver, connection_info ) @mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'connect_volume') @mock.patch.object(volume_drivers.LibvirtFakeVolumeDriver, 'get_config') def test_get_volume_config(self, get_config, connect_volume): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) connection_info = {'driver_volume_type': 'fake', 'data': {'device_path': '/fake', 'access_mode': 'rw'}} bdm = {'device_name': 'vdb', 'disk_bus': 'fake-bus', 'device_type': 'fake-type'} disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'], 'dev': 'vdb'} mock_config = mock.MagicMock() get_config.return_value = mock_config config = drvr._get_volume_config(connection_info, disk_info) get_config.assert_called_once_with(connection_info, disk_info) self.assertEqual(mock_config, config) def test_attach_invalid_volume_type(self): self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup instance = objects.Instance(**self.test_instance) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.VolumeDriverNotFound, drvr.attach_volume, None, {"driver_volume_type": "badtype"}, instance, "/dev/sda") def test_attach_blockio_invalid_hypervisor(self): self.flags(virt_type='lxc', group='libvirt') self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup instance = objects.Instance(**self.test_instance) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.InvalidHypervisorType, drvr.attach_volume, None, {"driver_volume_type": "fake", "data": {"logical_block_size": "4096", "physical_block_size": "4096"} }, instance, "/dev/sda") def _test_check_discard(self, mock_log, driver_discard=None, bus=None, should_log=False): mock_config = mock.Mock() mock_config.driver_discard = driver_discard mock_config.target_bus = bus mock_instance = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._check_discard_for_attach_volume(mock_config, mock_instance) self.assertEqual(should_log, mock_log.called) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_no_unmap(self, mock_log): self._test_check_discard(mock_log, driver_discard=None, bus='scsi', should_log=False) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_blk_controller(self, mock_log): self._test_check_discard(mock_log, driver_discard='unmap', bus='virtio', should_log=True) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_valid_controller(self, mock_log): self._test_check_discard(mock_log, driver_discard='unmap', bus='scsi', should_log=False) @mock.patch('nova.virt.libvirt.driver.LOG.debug') def test_check_discard_for_attach_volume_blk_controller_no_unmap(self, mock_log): self._test_check_discard(mock_log, driver_discard=None, bus='virtio', should_log=False) @mock.patch('nova.utils.get_image_from_system_metadata') @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_attach_volume_with_vir_domain_affect_live_flag(self, mock_get_domain, mock_get_info, get_image): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = {} get_image.return_value = image_meta mock_dom = mock.MagicMock() mock_get_domain.return_value = mock_dom connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} bdm = {'device_name': 'vdb', 'disk_bus': 'fake-bus', 'device_type': 'fake-type'} disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'], 'dev': 'vdb'} mock_get_info.return_value = disk_info mock_conf = mock.MagicMock() flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) with test.nested( mock.patch.object(drvr, '_connect_volume'), mock.patch.object(drvr, '_get_volume_config', return_value=mock_conf), mock.patch.object(drvr, '_set_cache_mode'), mock.patch.object(drvr, '_check_discard_for_attach_volume') ) as (mock_connect_volume, mock_get_volume_config, mock_set_cache_mode, mock_check_discard): for state in (power_state.RUNNING, power_state.PAUSED): mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678] drvr.attach_volume(self.context, connection_info, instance, "/dev/vdb", disk_bus=bdm['disk_bus'], device_type=bdm['device_type']) mock_get_domain.assert_called_with(instance) mock_get_info.assert_called_with( instance, CONF.libvirt.virt_type, test.MatchType(objects.ImageMeta), bdm) mock_connect_volume.assert_called_with( connection_info, disk_info) mock_get_volume_config.assert_called_with( connection_info, disk_info) mock_set_cache_mode.assert_called_with(mock_conf) mock_dom.attachDeviceFlags.assert_called_with( mock_conf.to_xml(), flags=flags) mock_check_discard.assert_called_with(mock_conf, instance) @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_detach_volume_with_vir_domain_affect_live_flag(self, mock_get_domain): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_xml_with_disk = """<domain> <devices> <disk type='file'> <source file='/path/to/fake-volume'/> <target dev='vdc' bus='virtio'/> </disk> </devices> </domain>""" mock_xml_without_disk = """<domain> <devices> </devices> </domain>""" mock_dom = mock.MagicMock() # Second time don't return anything about disk vdc so it looks removed return_list = [mock_xml_with_disk, mock_xml_without_disk] # Doubling the size of return list because we test with two guest power # states mock_dom.XMLDesc.side_effect = return_list + return_list connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE) with mock.patch.object(drvr, '_disconnect_volume') as \ mock_disconnect_volume: for state in (power_state.RUNNING, power_state.PAUSED): mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678] mock_get_domain.return_value = mock_dom drvr.detach_volume(connection_info, instance, '/dev/vdc') mock_get_domain.assert_called_with(instance) mock_dom.detachDeviceFlags.assert_called_with("""<disk type="file" device="disk"> <source file="/path/to/fake-volume"/> <target bus="virtio" dev="vdc"/> </disk> """, flags=flags) mock_disconnect_volume.assert_called_with( connection_info, 'vdc') @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_detach_volume_disk_not_found(self, mock_get_domain): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_xml_without_disk = """<domain> <devices> </devices> </domain>""" mock_dom = mock.MagicMock(return_value=mock_xml_without_disk) connection_info = {"driver_volume_type": "fake", "data": {"device_path": "/fake", "access_mode": "rw"}} mock_dom.info.return_value = [power_state.RUNNING, 512, 512, 2, 1234, 5678] mock_get_domain.return_value = mock_dom self.assertRaises(exception.DiskNotFound, drvr.detach_volume, connection_info, instance, '/dev/vdc') mock_get_domain.assert_called_once_with(instance) def test_multi_nic(self): network_info = _fake_network_info(self, 2) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) interfaces = tree.findall("./devices/interface") self.assertEqual(len(interfaces), 2) self.assertEqual(interfaces[0].get('type'), 'bridge') def _behave_supports_direct_io(self, raise_open=False, raise_write=False, exc=ValueError()): open_behavior = os.open(os.path.join('.', '.directio.test'), os.O_CREAT | os.O_WRONLY | os.O_DIRECT) if raise_open: open_behavior.AndRaise(exc) else: open_behavior.AndReturn(3) write_bahavior = os.write(3, mox.IgnoreArg()) if raise_write: write_bahavior.AndRaise(exc) # ensure unlink(filepath) will actually remove the file by deleting # the remaining link to it in close(fd) os.close(3) os.unlink(3) def test_supports_direct_io(self): # O_DIRECT is not supported on all Python runtimes, so on platforms # where it's not supported (e.g. Mac), we can still test the code-path # by stubbing out the value. if not hasattr(os, 'O_DIRECT'): # `mock` seems to have trouble stubbing an attr that doesn't # originally exist, so falling back to stubbing out the attribute # directly. os.O_DIRECT = 16384 self.addCleanup(delattr, os, 'O_DIRECT') einval = OSError() einval.errno = errno.EINVAL self.mox.StubOutWithMock(os, 'open') self.mox.StubOutWithMock(os, 'write') self.mox.StubOutWithMock(os, 'close') self.mox.StubOutWithMock(os, 'unlink') _supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io self._behave_supports_direct_io() self._behave_supports_direct_io(raise_write=True) self._behave_supports_direct_io(raise_open=True) self._behave_supports_direct_io(raise_write=True, exc=einval) self._behave_supports_direct_io(raise_open=True, exc=einval) self.mox.ReplayAll() self.assertTrue(_supports_direct_io('.')) self.assertRaises(ValueError, _supports_direct_io, '.') self.assertRaises(ValueError, _supports_direct_io, '.') self.assertFalse(_supports_direct_io('.')) self.assertFalse(_supports_direct_io('.')) self.mox.VerifyAll() def _check_xml_and_container(self, instance): instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), 'lxc:///') network_info = _fake_network_info(self, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) check = [ (lambda t: t.find('.').get('type'), 'lxc'), (lambda t: t.find('./os/type').text, 'exe'), (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')] for i, (check, expected_result) in enumerate(check): self.assertEqual(check(tree), expected_result, '%s failed common check %d' % (xml, i)) target = tree.find('./devices/filesystem/source').get('dir') self.assertGreater(len(target), 0) def _check_xml_and_disk_prefix(self, instance, prefix): instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) def _get_prefix(p, default): if p: return p + 'a' return default type_disk_map = { 'qemu': [ (lambda t: t.find('.').get('type'), 'qemu'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'vda'))], 'xen': [ (lambda t: t.find('.').get('type'), 'xen'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'xvda'))], 'kvm': [ (lambda t: t.find('.').get('type'), 'kvm'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'vda'))], 'uml': [ (lambda t: t.find('.').get('type'), 'uml'), (lambda t: t.find('./devices/disk/target').get('dev'), _get_prefix(prefix, 'ubda'))] } for (virt_type, checks) in six.iteritems(type_disk_map): self.flags(virt_type=virt_type, group='libvirt') if prefix: self.flags(disk_prefix=prefix, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) network_info = _fake_network_info(self, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, '%s != %s failed check %d' % (check(tree), expected_result, i)) def _check_xml_and_disk_driver(self, image_meta): os_open = os.open directio_supported = True def os_open_stub(path, flags, *args, **kwargs): if flags & os.O_DIRECT: if not directio_supported: raise OSError(errno.EINVAL, '%s: %s' % (os.strerror(errno.EINVAL), path)) flags &= ~os.O_DIRECT return os_open(path, flags, *args, **kwargs) self.stub_out('os.open', os_open_stub) @staticmethod def connection_supports_direct_io_stub(dirpath): return directio_supported self.stubs.Set(libvirt_driver.LibvirtDriver, '_supports_direct_io', connection_supports_direct_io_stub) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) network_info = _fake_network_info(self, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) disks = tree.findall('./devices/disk/driver') for guest_disk in disks: self.assertEqual(guest_disk.get("cache"), "none") directio_supported = False # The O_DIRECT availability is cached on first use in # LibvirtDriver, hence we re-create it here drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) disks = tree.findall('./devices/disk/driver') for guest_disk in disks: self.assertEqual(guest_disk.get("cache"), "writethrough") def _check_xml_and_disk_bus(self, image_meta, block_device_info, wantConfig): instance_ref = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, block_device_info) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta, block_device_info=block_device_info) tree = etree.fromstring(xml) got_disks = tree.findall('./devices/disk') got_disk_targets = tree.findall('./devices/disk/target') for i in range(len(wantConfig)): want_device_type = wantConfig[i][0] want_device_bus = wantConfig[i][1] want_device_dev = wantConfig[i][2] got_device_type = got_disks[i].get('device') got_device_bus = got_disk_targets[i].get('bus') got_device_dev = got_disk_targets[i].get('dev') self.assertEqual(got_device_type, want_device_type) self.assertEqual(got_device_bus, want_device_bus) self.assertEqual(got_device_dev, want_device_dev) def _check_xml_and_uuid(self, image_meta): instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) network_info = _fake_network_info(self, 1) drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) xml = drv._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta) tree = etree.fromstring(xml) self.assertEqual(tree.find('./uuid').text, instance_ref['uuid']) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_host_sysinfo_serial_hardware",) def _check_xml_and_uri(self, instance, mock_serial, expect_ramdisk=False, expect_kernel=False, rescue=None, expect_xen_hvm=False, xen_only=False): mock_serial.return_value = "cef19ce0-0ca2-11df-855d-b19fbce37686" instance_ref = objects.Instance(**instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) xen_vm_mode = vm_mode.XEN if expect_xen_hvm: xen_vm_mode = vm_mode.HVM type_uri_map = {'qemu': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), (lambda t: t.find('./os/type').text, vm_mode.HVM), (lambda t: t.find('./devices/emulator'), None)]), 'kvm': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'kvm'), (lambda t: t.find('./os/type').text, vm_mode.HVM), (lambda t: t.find('./devices/emulator'), None)]), 'uml': ('uml:///system', [(lambda t: t.find('.').get('type'), 'uml'), (lambda t: t.find('./os/type').text, vm_mode.UML)]), 'xen': ('xen:///', [(lambda t: t.find('.').get('type'), 'xen'), (lambda t: t.find('./os/type').text, xen_vm_mode)])} if expect_xen_hvm or xen_only: hypervisors_to_check = ['xen'] else: hypervisors_to_check = ['qemu', 'kvm', 'xen'] for hypervisor_type in hypervisors_to_check: check_list = type_uri_map[hypervisor_type][1] if rescue: suffix = '.rescue' else: suffix = '' if expect_kernel: check = (lambda t: self.relpath(t.find('./os/kernel').text). split('/')[1], 'kernel' + suffix) else: check = (lambda t: t.find('./os/kernel'), None) check_list.append(check) if expect_kernel: check = (lambda t: "no_timer_check" in t.find('./os/cmdline'). text, hypervisor_type == "qemu") check_list.append(check) # Hypervisors that only support vm_mode.HVM and Xen # should not produce configuration that results in kernel # arguments if not expect_kernel and (hypervisor_type in ['qemu', 'kvm', 'xen']): check = (lambda t: t.find('./os/root'), None) check_list.append(check) check = (lambda t: t.find('./os/cmdline'), None) check_list.append(check) if expect_ramdisk: check = (lambda t: self.relpath(t.find('./os/initrd').text). split('/')[1], 'ramdisk' + suffix) else: check = (lambda t: t.find('./os/initrd'), None) check_list.append(check) if hypervisor_type in ['qemu', 'kvm']: xpath = "./sysinfo/system/entry" check = (lambda t: t.findall(xpath)[0].get("name"), "manufacturer") check_list.append(check) check = (lambda t: t.findall(xpath)[0].text, version.vendor_string()) check_list.append(check) check = (lambda t: t.findall(xpath)[1].get("name"), "product") check_list.append(check) check = (lambda t: t.findall(xpath)[1].text, version.product_string()) check_list.append(check) check = (lambda t: t.findall(xpath)[2].get("name"), "version") check_list.append(check) # NOTE(sirp): empty strings don't roundtrip in lxml (they are # converted to None), so we need an `or ''` to correct for that check = (lambda t: t.findall(xpath)[2].text or '', version.version_string_with_package()) check_list.append(check) check = (lambda t: t.findall(xpath)[3].get("name"), "serial") check_list.append(check) check = (lambda t: t.findall(xpath)[3].text, "cef19ce0-0ca2-11df-855d-b19fbce37686") check_list.append(check) check = (lambda t: t.findall(xpath)[4].get("name"), "uuid") check_list.append(check) check = (lambda t: t.findall(xpath)[4].text, instance['uuid']) check_list.append(check) if hypervisor_type in ['qemu', 'kvm']: check = (lambda t: t.findall('./devices/serial')[0].get( 'type'), 'file') check_list.append(check) check = (lambda t: t.findall('./devices/serial')[1].get( 'type'), 'pty') check_list.append(check) check = (lambda t: self.relpath(t.findall( './devices/serial/source')[0].get('path')). split('/')[1], 'console.log') check_list.append(check) else: check = (lambda t: t.find('./devices/console').get( 'type'), 'pty') check_list.append(check) common_checks = [ (lambda t: t.find('.').tag, 'domain'), (lambda t: t.find('./memory').text, '2097152')] if rescue: common_checks += [ (lambda t: self.relpath(t.findall('./devices/disk/source')[0]. get('file')).split('/')[1], 'disk.rescue'), (lambda t: self.relpath(t.findall('./devices/disk/source')[1]. get('file')).split('/')[1], 'disk')] else: common_checks += [(lambda t: self.relpath(t.findall( './devices/disk/source')[0].get('file')).split('/')[1], 'disk')] common_checks += [(lambda t: self.relpath(t.findall( './devices/disk/source')[1].get('file')).split('/')[1], 'disk.local')] for virt_type in hypervisors_to_check: expected_uri = type_uri_map[virt_type][0] checks = type_uri_map[virt_type][1] self.flags(virt_type=virt_type, group='libvirt') with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt: del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), expected_uri) network_info = _fake_network_info(self, 1) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, rescue=rescue) xml = drvr._get_guest_xml(self.context, instance_ref, network_info, disk_info, image_meta, rescue=rescue) tree = etree.fromstring(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, '%s != %s failed check %d' % (check(tree), expected_result, i)) for i, (check, expected_result) in enumerate(common_checks): self.assertEqual(check(tree), expected_result, '%s != %s failed common check %d' % (check(tree), expected_result, i)) filterref = './devices/interface/filterref' vif = network_info[0] nic_id = vif['address'].lower().replace(':', '') fw = firewall.NWFilterFirewall(drvr) instance_filter_name = fw._instance_filter_name(instance_ref, nic_id) self.assertEqual(tree.find(filterref).get('filter'), instance_filter_name) # This test is supposed to make sure we don't # override a specifically set uri # # Deliberately not just assigning this string to CONF.connection_uri # and checking against that later on. This way we make sure the # implementation doesn't fiddle around with the CONF. testuri = 'something completely different' self.flags(connection_uri=testuri, group='libvirt') for (virt_type, (expected_uri, checks)) in six.iteritems(type_uri_map): self.flags(virt_type=virt_type, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertEqual(drvr._uri(), testuri) def test_ensure_filtering_rules_for_instance_timeout(self): # ensure_filtering_fules_for_instance() finishes with timeout. # Preparing mocks def fake_none(self, *args): return class FakeTime(object): def __init__(self): self.counter = 0 def sleep(self, t): self.counter += t fake_timer = FakeTime() def fake_sleep(t): fake_timer.sleep(t) # _fake_network_info must be called before create_fake_libvirt_mock(), # as _fake_network_info calls importutils.import_class() and # create_fake_libvirt_mock() mocks importutils.import_class(). network_info = _fake_network_info(self, 1) self.create_fake_libvirt_mock() instance_ref = objects.Instance(**self.test_instance) # Start test self.mox.ReplayAll() try: drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr.firewall_driver, 'setup_basic_filtering', fake_none) self.stubs.Set(drvr.firewall_driver, 'prepare_instance_filter', fake_none) self.stubs.Set(drvr.firewall_driver, 'instance_filter_exists', fake_none) self.stubs.Set(greenthread, 'sleep', fake_sleep) drvr.ensure_filtering_rules_for_instance(instance_ref, network_info) except exception.NovaException as e: msg = ('The firewall filter for %s does not exist' % instance_ref['name']) c1 = (0 <= six.text_type(e).find(msg)) self.assertTrue(c1) self.assertEqual(29, fake_timer.counter, "Didn't wait the expected " "amount of time") @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_all_pass_with_block_migration( self, mock_cpu, mock_test_file, mock_svc): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'disk_available_least': 400, 'cpu_info': 'asdf', } filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename # No need for the src_compute_info return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, None, compute_info, True) return_value.is_volume_backed = False self.assertThat({"filename": "file", 'image_type': 'default', 'disk_available_mb': 409600, "disk_over_commit": False, "block_migration": True, "is_volume_backed": False}, matchers.DictMatches(return_value.to_legacy_dict())) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_all_pass_no_block_migration( self, mock_cpu, mock_test_file, mock_svc): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'disk_available_least': 400, 'cpu_info': 'asdf', } filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename # No need for the src_compute_info return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, None, compute_info, False) return_value.is_volume_backed = False self.assertThat({"filename": "file", "image_type": 'default', "block_migration": False, "disk_over_commit": False, "disk_available_mb": 409600, "is_volume_backed": False}, matchers.DictMatches(return_value.to_legacy_dict())) @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file', return_value='fake') @mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu') def test_check_can_live_migrate_guest_cpu_none_model( self, mock_cpu, mock_test_file): # Tests that when instance.vcpu_model.model is None, the host cpu # model is used for live migration. instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel instance_ref.vcpu_model.model = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1} result = drvr.check_can_live_migrate_destination( self.context, instance_ref, compute_info, compute_info) result.is_volume_backed = False mock_cpu.assert_called_once_with(None, 'asdf', instance_ref) expected_result = {"filename": 'fake', "image_type": CONF.libvirt.images_type, "block_migration": False, "disk_over_commit": False, "disk_available_mb": 1024, "is_volume_backed": False} self.assertEqual(expected_result, result.to_legacy_dict()) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_shared_storage_test_file') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_no_instance_cpu_info( self, mock_cpu, mock_test_file, mock_svc): instance_ref = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': jsonutils.dumps({ "vendor": "AMD", "arch": arch.I686, "features": ["sse3"], "model": "Opteron_G3", "topology": {"cores": 2, "threads": 1, "sockets": 4} }), 'disk_available_least': 1} filename = "file" # _check_cpu_match mock_cpu.return_value = 1 # mounted_on_same_shared_storage mock_test_file.return_value = filename return_value = drvr.check_can_live_migrate_destination(self.context, instance_ref, compute_info, compute_info, False) # NOTE(danms): Compute manager would have set this, so set it here return_value.is_volume_backed = False self.assertThat({"filename": "file", "image_type": 'default', "block_migration": False, "disk_over_commit": False, "disk_available_mb": 1024, "is_volume_backed": False}, matchers.DictMatches(return_value.to_legacy_dict())) @mock.patch.object(objects.Service, 'get_by_compute_host') @mock.patch.object(fakelibvirt.Connection, 'compareCPU') def test_check_can_live_migrate_dest_incompatible_cpu_raises( self, mock_cpu, mock_svc): instance_ref = objects.Instance(**self.test_instance) instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) compute_info = {'cpu_info': 'asdf', 'disk_available_least': 1} mock_cpu.side_effect = exception.InvalidCPUInfo(reason='foo') self.assertRaises(exception.InvalidCPUInfo, drvr.check_can_live_migrate_destination, self.context, instance_ref, compute_info, compute_info, False) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare): instance = objects.Instance(**self.test_instance) mock_compare.return_value = 5 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info), instance) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_handles_not_supported_error_gracefully(self, mock_vconfig, mock_compare): instance = objects.Instance(**self.test_instance) not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virCompareCPU', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) mock_compare.side_effect = not_supported_exc conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info), instance) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt.LibvirtDriver, '_vcpu_model_to_cpu_config') def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu, mock_compare): instance = objects.Instance(**self.test_instance) mock_compare.return_value = 6 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(jsonutils.dumps(_fake_cpu_info), None, instance) self.assertIsNone(ret) def test_compare_cpu_virt_type_xen(self): instance = objects.Instance(**self.test_instance) self.flags(virt_type='xen', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, None, instance) self.assertIsNone(ret) def test_compare_cpu_virt_type_qemu(self): instance = objects.Instance(**self.test_instance) self.flags(virt_type='qemu', group='libvirt') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ret = conn._compare_cpu(None, None, instance) self.assertIsNone(ret) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig, mock_compare): instance = objects.Instance(**self.test_instance) mock_compare.return_value = 0 conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.InvalidCPUInfo, conn._compare_cpu, None, jsonutils.dumps(_fake_cpu_info), instance) @mock.patch.object(host.Host, 'compare_cpu') @mock.patch.object(nova.virt.libvirt, 'config') def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig, mock_compare): instance = objects.Instance(**self.test_instance) mock_compare.side_effect = fakelibvirt.libvirtError('cpu') conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationPreCheckError, conn._compare_cpu, None, jsonutils.dumps(_fake_cpu_info), instance) def test_check_can_live_migrate_dest_cleanup_works_correctly(self): objects.Instance(**self.test_instance) dest_check_data = objects.LibvirtLiveMigrateData( filename="file", block_migration=True, disk_over_commit=False, disk_available_mb=1024) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_cleanup_shared_storage_test_file') drvr._cleanup_shared_storage_test_file("file") self.mox.ReplayAll() drvr.cleanup_live_migration_destination_check(self.context, dest_check_data) @mock.patch('os.path.exists', return_value=True) @mock.patch('os.utime') def test_check_shared_storage_test_file_exists(self, mock_utime, mock_path_exists): tmpfile_path = os.path.join(CONF.instances_path, 'tmp123') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertTrue(drvr._check_shared_storage_test_file( 'tmp123', mock.sentinel.instance)) mock_utime.assert_called_once_with(CONF.instances_path, None) mock_path_exists.assert_called_once_with(tmpfile_path) @mock.patch('os.path.exists', return_value=False) @mock.patch('os.utime') def test_check_shared_storage_test_file_does_not_exist(self, mock_utime, mock_path_exists): tmpfile_path = os.path.join(CONF.instances_path, 'tmp123') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._check_shared_storage_test_file( 'tmp123', mock.sentinel.instance)) mock_utime.assert_called_once_with(CONF.instances_path, None) mock_path_exists.assert_called_once_with(tmpfile_path) def _mock_can_live_migrate_source(self, block_migration=False, is_shared_block_storage=False, is_shared_instance_path=False, is_booted_from_volume=False, disk_available_mb=1024, block_device_info=None, block_device_text=None): instance = objects.Instance(**self.test_instance) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', block_migration=block_migration, disk_over_commit=False, disk_available_mb=disk_available_mb) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_is_shared_block_storage') drvr._is_shared_block_storage(instance, dest_check_data, block_device_info).AndReturn(is_shared_block_storage) self.mox.StubOutWithMock(drvr, '_check_shared_storage_test_file') drvr._check_shared_storage_test_file('file', instance).AndReturn( is_shared_instance_path) self.mox.StubOutWithMock(drvr, "get_instance_disk_info") drvr.get_instance_disk_info(instance, block_device_info=block_device_info).\ AndReturn(block_device_text) self.mox.StubOutWithMock(drvr, '_is_booted_from_volume') drvr._is_booted_from_volume(instance, block_device_text).AndReturn( is_booted_from_volume) return (instance, dest_check_data, drvr) def test_check_can_live_migrate_source_block_migration(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True) self.mox.StubOutWithMock(drvr, "_assert_dest_node_has_enough_disk") drvr._assert_dest_node_has_enough_disk( self.context, instance, dest_check_data.disk_available_mb, False, None) self.mox.ReplayAll() ret = drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) self.assertIsInstance(ret, objects.LibvirtLiveMigrateData) self.assertIn('is_shared_block_storage', ret) self.assertIn('is_shared_instance_path', ret) def test_check_can_live_migrate_source_shared_block_storage(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_shared_block_storage=True) self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_shared_instance_path(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_shared_instance_path=True) self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_non_shared_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source() self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_shared_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, is_shared_block_storage=True) self.mox.ReplayAll() self.assertRaises(exception.InvalidLocalStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_shared_path_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, is_shared_instance_path=True) self.mox.ReplayAll() self.assertRaises(exception.InvalidLocalStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data, None) def test_check_can_live_migrate_non_shared_non_block_migration_fails(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source() self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_with_dest_not_enough_disk(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( block_migration=True, disk_available_mb=0) drvr.get_instance_disk_info(instance, block_device_info=None).AndReturn( '[{"virt_disk_size":2}]') self.mox.ReplayAll() self.assertRaises(exception.MigrationError, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) def test_check_can_live_migrate_source_booted_from_volume(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_booted_from_volume=True, block_device_text='[]') self.mox.ReplayAll() drvr.check_can_live_migrate_source(self.context, instance, dest_check_data) def test_check_can_live_migrate_source_booted_from_volume_with_swap(self): instance, dest_check_data, drvr = self._mock_can_live_migrate_source( is_booted_from_volume=True, block_device_text='[{"path":"disk.swap"}]') self.mox.ReplayAll() self.assertRaises(exception.InvalidSharedStorage, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data) @mock.patch.object(host.Host, 'has_min_version', return_value=False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage', return_value=False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file', return_value=False) def test_check_can_live_migrate_source_block_migration_with_bdm_error( self, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_min_version): bdi = {'block_device_mapping': ['bdm']} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', block_migration=True, disk_over_commit=False, disk_available_mb=100) self.assertRaises(exception.MigrationPreCheckError, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data, block_device_info=bdi) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage', return_value=False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file', return_value=False) def test_check_can_live_migrate_source_bm_with_bdm_tunnelled_error( self, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_min_version): self.flags(live_migration_tunnelled=True, group='libvirt') bdi = {'block_device_mapping': ['bdm']} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', block_migration=True, disk_over_commit=False, disk_available_mb=100) drvr._parse_migration_flags() self.assertRaises(exception.MigrationPreCheckError, drvr.check_can_live_migrate_source, self.context, instance, dest_check_data, block_device_info=bdi) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file') def _test_check_can_live_migrate_source_block_migration_none( self, block_migrate, is_shared_instance_path, is_share_block, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_verson): mock_check.return_value = is_shared_instance_path mock_shared_block.return_value = is_share_block instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', disk_over_commit=False, disk_available_mb=100) dest_check_data_ret = drvr.check_can_live_migrate_source( self.context, instance, dest_check_data) self.assertEqual(block_migrate, dest_check_data_ret.block_migration) def test_check_can_live_migrate_source_block_migration_none_shared1(self): self._test_check_can_live_migrate_source_block_migration_none( False, True, False) def test_check_can_live_migrate_source_block_migration_none_shared2(self): self._test_check_can_live_migrate_source_block_migration_none( False, False, True) def test_check_can_live_migrate_source_block_migration_none_no_share(self): self._test_check_can_live_migrate_source_block_migration_none( True, False, False) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_assert_dest_node_has_enough_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_has_local_disk') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_booted_from_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' 'get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_is_shared_block_storage') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_check_shared_storage_test_file') def test_check_can_live_migration_source_disk_over_commit_none(self, mock_check, mock_shared_block, mock_get_bdi, mock_booted_from_volume, mock_has_local, mock_enough, mock_disk_check): mock_check.return_value = False mock_shared_block.return_value = False instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dest_check_data = objects.LibvirtLiveMigrateData( filename='file', image_type='default', disk_available_mb=100) drvr.check_can_live_migrate_source( self.context, instance, dest_check_data) self.assertFalse(mock_disk_check.called) def _is_shared_block_storage_test_create_mocks(self, disks): # Test data instance_xml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>{}</devices></domain>") disks_xml = '' for dsk in disks: if dsk['type'] is not 'network': disks_xml = ''.join([disks_xml, "<disk type='{type}'>" "<driver name='qemu' type='{driver}'/>" "<source {source}='{source_path}'/>" "<target dev='{target_dev}' bus='virtio'/>" "</disk>".format(**dsk)]) else: disks_xml = ''.join([disks_xml, "<disk type='{type}'>" "<driver name='qemu' type='{driver}'/>" "<source protocol='{source_proto}'" "name='{source_image}' >" "<host name='hostname' port='7000'/>" "<config file='/path/to/file'/>" "</source>" "<target dev='{target_dev}'" "bus='ide'/>".format(**dsk)]) # Preparing mocks mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.XMLDesc = mock.Mock() mock_virDomain.XMLDesc.return_value = (instance_xml.format(disks_xml)) mock_lookup = mock.Mock() def mock_lookup_side_effect(name): return mock_virDomain mock_lookup.side_effect = mock_lookup_side_effect mock_getsize = mock.Mock() mock_getsize.return_value = "10737418240" return (mock_getsize, mock_lookup) def test_is_shared_block_storage_rbd(self): self.flags(images_type='rbd', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_instance_disk_info = mock.Mock() data = objects.LibvirtLiveMigrateData(image_type='rbd') with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertTrue(drvr._is_shared_block_storage(instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) self.assertTrue(drvr._is_storage_shared_with('foo', 'bar')) def test_is_shared_block_storage_lvm(self): self.flags(images_type='lvm', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(image_type='lvm', is_volume_backed=False, is_shared_instance_path=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_qcow2(self): self.flags(images_type='qcow2', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(image_type='qcow2', is_volume_backed=False, is_shared_instance_path=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_rbd_only_source(self): self.flags(images_type='rbd', group='libvirt') bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(is_shared_instance_path=False, is_volume_backed=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_rbd_only_dest(self): bdi = {'block_device_mapping': []} instance = objects.Instance(**self.test_instance) mock_get_instance_disk_info = mock.Mock() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) data = objects.LibvirtLiveMigrateData(image_type='rbd', is_volume_backed=False, is_shared_instance_path=False) with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_is_shared_block_storage_volume_backed(self): disks = [{'type': 'block', 'driver': 'raw', 'source': 'dev', 'source_path': '/dev/disk', 'target_dev': 'vda'}] bdi = {'block_device_mapping': [ {'connection_info': 'info', 'mount_device': '/dev/vda'}]} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) (mock_getsize, mock_lookup) =\ self._is_shared_block_storage_test_create_mocks(disks) data = objects.LibvirtLiveMigrateData(is_volume_backed=True, is_shared_instance_path=False) with mock.patch.object(host.Host, 'get_domain', mock_lookup): self.assertTrue(drvr._is_shared_block_storage(instance, data, block_device_info = bdi)) mock_lookup.assert_called_once_with(instance) def test_is_shared_block_storage_volume_backed_with_disk(self): disks = [{'type': 'block', 'driver': 'raw', 'source': 'dev', 'source_path': '/dev/disk', 'target_dev': 'vda'}, {'type': 'file', 'driver': 'raw', 'source': 'file', 'source_path': '/instance/disk.local', 'target_dev': 'vdb'}] bdi = {'block_device_mapping': [ {'connection_info': 'info', 'mount_device': '/dev/vda'}]} instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) (mock_getsize, mock_lookup) =\ self._is_shared_block_storage_test_create_mocks(disks) data = objects.LibvirtLiveMigrateData(is_volume_backed=True, is_shared_instance_path=False) with test.nested( mock.patch.object(os.path, 'getsize', mock_getsize), mock.patch.object(host.Host, 'get_domain', mock_lookup)): self.assertFalse(drvr._is_shared_block_storage( instance, data, block_device_info = bdi)) mock_getsize.assert_called_once_with('/instance/disk.local') mock_lookup.assert_called_once_with(instance) def test_is_shared_block_storage_nfs(self): bdi = {'block_device_mapping': []} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_backend = mock.MagicMock() mock_image_backend.backend.return_value = mock_backend mock_backend.is_file_in_instance_path.return_value = True mock_get_instance_disk_info = mock.Mock() data = objects.LibvirtLiveMigrateData( is_shared_instance_path=True, image_type='foo') with mock.patch.object(drvr, 'get_instance_disk_info', mock_get_instance_disk_info): self.assertTrue(drvr._is_shared_block_storage( 'instance', data, block_device_info=bdi)) self.assertEqual(0, mock_get_instance_disk_info.call_count) def test_live_migration_update_graphics_xml(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) xml_tmpl = ("<domain type='kvm'>" "<devices>" "<graphics type='vnc' listen='{vnc}'>" "<listen address='{vnc}'/>" "</graphics>" "<graphics type='spice' listen='{spice}'>" "<listen address='{spice}'/>" "</graphics>" "</devices>" "</domain>") initial_xml = xml_tmpl.format(vnc='1.2.3.4', spice='5.6.7.8') target_xml = xml_tmpl.format(vnc='10.0.0.1', spice='10.0.0.2') target_xml = etree.tostring(etree.fromstring(target_xml)) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) guest = libvirt_guest.Guest(vdmock) self.mox.StubOutWithMock(vdmock, "migrateToURI2") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn( initial_xml) vdmock.migrateToURI2(drvr._live_migration_uri('dest'), dxml=target_xml, flags=mox.IgnoreArg(), bandwidth=_bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='10.0.0.1', graphics_listen_addr_spice='10.0.0.2', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, guest, []) def test_live_migration_update_volume_xml(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') # start test connection_info = { u'driver_volume_type': u'iscsi', u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', }, } bdm = objects.LibvirtLiveMigrateBDMInfo( serial='58a84f6d-3f0c-4e19-a0af-eb657b790657', bus='virtio', type='disk', dev='vdb', connection_info=connection_info) migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr=None, bdms=[bdm], block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_mock = mock.MagicMock() guest = libvirt_guest.Guest(test_mock) with mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') as \ mget_info,\ mock.patch.object(drvr._host, 'get_domain') as mget_domain,\ mock.patch.object(fakelibvirt.virDomain, 'migrateToURI2'),\ mock.patch.object( libvirt_migrate, 'get_updated_guest_xml') as mupdate: mget_info.side_effect = exception.InstanceNotFound( instance_id='foo') mget_domain.return_value = test_mock test_mock.XMLDesc.return_value = target_xml self.assertFalse(drvr._live_migration_operation( self.context, instance_ref, 'dest', False, migrate_data, guest, [])) mupdate.assert_called_once_with( guest, migrate_data, mock.ANY) def test_live_migration_with_valid_target_connect_addr(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') # start test connection_info = { u'driver_volume_type': u'iscsi', u'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', }, } bdm = objects.LibvirtLiveMigrateBDMInfo( serial='58a84f6d-3f0c-4e19-a0af-eb657b790657', bus='virtio', type='disk', dev='vdb', connection_info=connection_info) migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr='127.0.0.2', bdms=[bdm], block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_mock = mock.MagicMock() guest = libvirt_guest.Guest(test_mock) with mock.patch.object(libvirt_migrate, 'get_updated_guest_xml') as mupdate: test_mock.XMLDesc.return_value = target_xml drvr._live_migration_operation(self.context, instance_ref, 'dest', False, migrate_data, guest, []) test_mock.migrateToURI2.assert_called_once_with( 'qemu+tcp://127.0.0.2/system', dxml=mupdate(), flags=0, bandwidth=0) def test_update_volume_xml(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) initial_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'cde.67890.opst-lun-Z') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial, bus='virtio', type='disk', dev='vdb') bdmi.connection_info = {u'driver_volume_type': u'iscsi', 'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': {u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', 'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}} conf = vconfig.LibvirtConfigGuestDisk() conf.source_device = bdmi.type conf.driver_name = "qemu" conf.driver_format = "raw" conf.driver_cache = "none" conf.target_dev = bdmi.dev conf.target_bus = bdmi.bus conf.serial = bdmi.connection_info.get('serial') conf.source_type = "block" conf.source_path = bdmi.connection_info['data'].get('device_path') guest = libvirt_guest.Guest(mock.MagicMock()) with test.nested( mock.patch.object(drvr, '_get_volume_config', return_value=conf), mock.patch.object(guest, 'get_xml_desc', return_value=initial_xml)): config = libvirt_migrate.get_updated_guest_xml(guest, objects.LibvirtLiveMigrateData(bdms=[bdmi]), drvr._get_volume_config) parser = etree.XMLParser(remove_blank_text=True) config = etree.fromstring(config, parser) target_xml = etree.fromstring(target_xml, parser) self.assertEqual(etree.tostring(target_xml), etree.tostring(config)) def test_live_migration_uri(self): hypervisor_uri_map = ( ('xen', 'xenmigr://%s/system'), ('kvm', 'qemu+tcp://%s/system'), ('qemu', 'qemu+tcp://%s/system'), # anything else will return None ('lxc', None), ('parallels', None), ) dest = 'destination' for hyperv, uri in hypervisor_uri_map: self.flags(virt_type=hyperv, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) if uri is not None: uri = uri % dest self.assertEqual(uri, drvr._live_migration_uri(dest)) else: self.assertRaises(exception.LiveMigrationURINotAvailable, drvr._live_migration_uri, dest) def test_live_migration_uri_forced(self): dest = 'destination' for hyperv in ('kvm', 'xen'): self.flags(virt_type=hyperv, group='libvirt') forced_uri = 'foo://%s/bar' self.flags(live_migration_uri=forced_uri, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(forced_uri % dest, drvr._live_migration_uri(dest)) def test_update_volume_xml_no_serial(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) xml_tmpl = """ <domain type='kvm'> <devices> <disk type='block' device='disk'> <driver name='qemu' type='raw' cache='none'/> <source dev='{device_path}'/> <target bus='virtio' dev='vdb'/> <serial></serial> <address type='pci' domain='0x0' bus='0x0' slot='0x04' \ function='0x0'/> </disk> </devices> </domain> """ initial_xml = xml_tmpl.format(device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = xml_tmpl.format(device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" connection_info = { u'driver_volume_type': u'iscsi', 'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657', u'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z', }, } bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial, bus='virtio', dev='vdb', type='disk') bdmi.connection_info = connection_info conf = vconfig.LibvirtConfigGuestDisk() conf.source_device = bdmi.type conf.driver_name = "qemu" conf.driver_format = "raw" conf.driver_cache = "none" conf.target_dev = bdmi.dev conf.target_bus = bdmi.bus conf.serial = bdmi.connection_info.get('serial') conf.source_type = "block" conf.source_path = bdmi.connection_info['data'].get('device_path') guest = libvirt_guest.Guest(mock.MagicMock()) with test.nested( mock.patch.object(drvr, '_get_volume_config', return_value=conf), mock.patch.object(guest, 'get_xml_desc', return_value=initial_xml)): config = libvirt_migrate.get_updated_guest_xml(guest, objects.LibvirtLiveMigrateData(bdms=[bdmi]), drvr._get_volume_config) self.assertEqual(target_xml, config) def test_update_volume_xml_no_connection_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) initial_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = self.device_xml_tmpl.format( device_path='/dev/disk/by-path/' 'ip-1.2.3.4:3260-iqn.' 'abc.12345.opst-lun-X') target_xml = etree.tostring(etree.fromstring(target_xml)) serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657" bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial, dev='vdb', type='disk', bus='scsi', format='qcow') bdmi.connection_info = {} conf = vconfig.LibvirtConfigGuestDisk() guest = libvirt_guest.Guest(mock.MagicMock()) with test.nested( mock.patch.object(drvr, '_get_volume_config', return_value=conf), mock.patch.object(guest, 'get_xml_desc', return_value=initial_xml)): config = libvirt_migrate.get_updated_guest_xml( guest, objects.LibvirtLiveMigrateData(bdms=[bdmi]), drvr._get_volume_config) self.assertEqual(target_xml, config) @mock.patch.object(fakelibvirt.virDomain, "migrateToURI2") @mock.patch.object(fakelibvirt.virDomain, "XMLDesc") def test_live_migration_update_serial_console_xml(self, mock_xml, mock_migrate): self.compute = importutils.import_object(CONF.compute_manager) instance_ref = self.test_instance xml_tmpl = ("<domain type='kvm'>" "<devices>" "<console type='tcp'>" "<source mode='bind' host='{addr}' service='10000'/>" "</console>" "</devices>" "</domain>") initial_xml = xml_tmpl.format(addr='9.0.0.1') target_xml = xml_tmpl.format(addr='9.0.0.12') target_xml = etree.tostring(etree.fromstring(target_xml)) # Preparing mocks mock_xml.return_value = initial_xml mock_migrate.side_effect = fakelibvirt.libvirtError("ERR") # start test bandwidth = CONF.libvirt.live_migration_bandwidth migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='10.0.0.1', graphics_listen_addr_spice='10.0.0.2', serial_listen_addr='9.0.0.12', target_connect_addr=None, bdms=[], block_migration=False) dom = fakelibvirt.virDomain guest = libvirt_guest.Guest(dom) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, guest, []) mock_xml.assert_called_once_with( flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE) mock_migrate.assert_called_once_with( drvr._live_migration_uri('dest'), dxml=target_xml, flags=mock.ANY, bandwidth=bandwidth) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_fails_with_serial_console_without_migratable(self): self.compute = importutils.import_object(CONF.compute_manager) instance_ref = self.test_instance CONF.set_override("enabled", True, "serial_console") dom = fakelibvirt.virDomain migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr=None, block_migration=False) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, dom, []) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_uses_migrateToURI_without_migratable_flag(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) guest = libvirt_guest.Guest(vdmock) self.mox.StubOutWithMock(vdmock, "migrateToURI") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.migrateToURI(drvr._live_migration_uri('dest'), flags=mox.IgnoreArg(), bandwidth=_bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='0.0.0.0', graphics_listen_addr_spice='0.0.0.0', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, guest, []) def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self): self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) guest = libvirt_guest.Guest(vdmock) self.mox.StubOutWithMock(vdmock, "migrateToURI") _bandwidth = CONF.libvirt.live_migration_bandwidth vdmock.migrateToURI(drvr._live_migration_uri('dest'), flags=mox.IgnoreArg(), bandwidth=_bandwidth).AndRaise( fakelibvirt.libvirtError("ERR")) # start test migrate_data = objects.LibvirtLiveMigrateData( serial_listen_addr='', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, guest, []) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(fakelibvirt.virDomain, "migrateToURI3") @mock.patch('nova.virt.libvirt.migration.get_updated_guest_xml', return_value='') @mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='<xml></xml>') def test_live_migration_uses_migrateToURI3( self, mock_old_xml, mock_new_xml, mock_migrateToURI3, mock_min_version): # Preparing mocks disk_paths = ['vda', 'vdb'] params = { 'migrate_disks': ['vda', 'vdb'], 'bandwidth': CONF.libvirt.live_migration_bandwidth, 'destination_xml': '', } mock_migrateToURI3.side_effect = fakelibvirt.libvirtError("ERR") # Start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='0.0.0.0', graphics_listen_addr_spice='0.0.0.0', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) dom = fakelibvirt.virDomain guest = libvirt_guest.Guest(dom) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance, 'dest', False, migrate_data, guest, disk_paths) mock_migrateToURI3.assert_called_once_with( drvr._live_migration_uri('dest'), params=params, flags=0) @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(fakelibvirt.virDomain, "migrateToURI3") @mock.patch('nova.virt.libvirt.migration.get_updated_guest_xml', return_value='') @mock.patch('nova.virt.libvirt.guest.Guest.get_xml_desc', return_value='') def test_block_live_migration_tunnelled_migrateToURI3( self, mock_old_xml, mock_new_xml, mock_migrateToURI3, mock_min_version): self.flags(live_migration_tunnelled=True, group='libvirt') # Preparing mocks disk_paths = [] params = { 'bandwidth': CONF.libvirt.live_migration_bandwidth, 'destination_xml': '', } # Start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='0.0.0.0', graphics_listen_addr_spice='0.0.0.0', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=True) dom = fakelibvirt.virDomain guest = libvirt_guest.Guest(dom) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._parse_migration_flags() instance = objects.Instance(**self.test_instance) drvr._live_migration_operation(self.context, instance, 'dest', True, migrate_data, guest, disk_paths) mock_migrateToURI3.assert_called_once_with( drvr._live_migration_uri('dest'), params=params, flags=151) @mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None, create=True) def test_live_migration_fails_without_migratable_flag_or_0_addr(self): self.flags(enabled=True, vncserver_listen='1.2.3.4', group='vnc') self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "migrateToURI") # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='1.2.3.4', graphics_listen_addr_spice='1.2.3.4', serial_listen_addr='127.0.0.1', target_connect_addr=None, block_migration=False) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.MigrationError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, vdmock, []) def test_live_migration_raises_exception(self): # Confirms recover method is called when exceptions are raised. # Preparing data self.compute = importutils.import_object(CONF.compute_manager) instance_dict = dict(self.test_instance) instance_dict.update({'host': 'fake', 'power_state': power_state.RUNNING, 'vm_state': vm_states.ACTIVE}) instance_ref = objects.Instance(**instance_dict) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) guest = libvirt_guest.Guest(vdmock) self.mox.StubOutWithMock(vdmock, "migrateToURI2") _bandwidth = CONF.libvirt.live_migration_bandwidth if getattr(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None: vdmock.migrateToURI(drvr._live_migration_uri('dest'), flags=mox.IgnoreArg(), bandwidth=_bandwidth).AndRaise( fakelibvirt.libvirtError('ERR')) else: vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE ).AndReturn(FakeVirtDomain().XMLDesc(flags=0)) vdmock.migrateToURI2(drvr._live_migration_uri('dest'), dxml=mox.IgnoreArg(), flags=mox.IgnoreArg(), bandwidth=_bandwidth).AndRaise( fakelibvirt.libvirtError('ERR')) # start test migrate_data = objects.LibvirtLiveMigrateData( graphics_listen_addr_vnc='127.0.0.1', graphics_listen_addr_spice='127.0.0.1', serial_listen_addr='127.0.0.1', target_connect_addr=None, bdms=[], block_migration=False) self.mox.ReplayAll() self.assertRaises(fakelibvirt.libvirtError, drvr._live_migration_operation, self.context, instance_ref, 'dest', False, migrate_data, guest, []) self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state) self.assertEqual(power_state.RUNNING, instance_ref.power_state) @mock.patch('shutil.rmtree') @mock.patch('os.path.exists', return_value=True) @mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy') def test_rollback_live_migration_at_dest_not_shared(self, mock_destroy, mock_get_instance_path, mock_exist, mock_shutil ): # destroy method may raise InstanceTerminationFailure or # InstancePowerOffFailure, here use their base class Invalid. mock_destroy.side_effect = exception.Invalid(reason='just test') fake_instance_path = os.path.join(cfg.CONF.instances_path, '/fake_instance_uuid') mock_get_instance_path.return_value = fake_instance_path drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) migrate_data = objects.LibvirtLiveMigrateData( is_shared_instance_path=False, instance_relative_path=False) self.assertRaises(exception.Invalid, drvr.rollback_live_migration_at_destination, "context", "instance", [], None, True, migrate_data) mock_exist.assert_called_once_with(fake_instance_path) mock_shutil.assert_called_once_with(fake_instance_path) @mock.patch('shutil.rmtree') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy') def test_rollback_live_migration_at_dest_shared(self, mock_destroy, mock_get_instance_path, mock_exist, mock_shutil ): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) migrate_data = objects.LibvirtLiveMigrateData( is_shared_instance_path=True, instance_relative_path=False) drvr.rollback_live_migration_at_destination("context", "instance", [], None, True, migrate_data) mock_destroy.assert_called_once_with("context", "instance", [], None, True, migrate_data) self.assertFalse(mock_get_instance_path.called) self.assertFalse(mock_exist.called) self.assertFalse(mock_shutil.called) @mock.patch.object(host.Host, "get_connection") @mock.patch.object(host.Host, "has_min_version", return_value=False) @mock.patch.object(fakelibvirt.Domain, "XMLDesc") def test_live_migration_copy_disk_paths(self, mock_xml, mock_version, mock_conn): xml = """ <domain> <name>dummy</name> <uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid> <devices> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.root"/> <target dev="vda"/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.shared"/> <target dev="vdb"/> <shareable/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.config"/> <target dev="vdc"/> <readonly/> </disk> <disk type="block"> <source dev="/dev/mapper/somevol"/> <target dev="vdd"/> </disk> <disk type="network"> <source protocol="https" name="url_path"> <host name="hostname" port="443"/> </source> </disk> </devices> </domain>""" mock_xml.return_value = xml drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) paths = drvr._live_migration_copy_disk_paths(None, None, guest) self.assertEqual((["/var/lib/nova/instance/123/disk.root", "/dev/mapper/somevol"], ['vda', 'vdd']), paths) @mock.patch.object(fakelibvirt.Domain, "XMLDesc") def test_live_migration_copy_disk_paths_tunnelled(self, mock_xml): self.flags(live_migration_tunnelled=True, group='libvirt') xml = """ <domain> <name>dummy</name> <uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid> <devices> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.root"/> <target dev="vda"/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.shared"/> <target dev="vdb"/> <shareable/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.config"/> <target dev="vdc"/> <readonly/> </disk> <disk type="block"> <source dev="/dev/mapper/somevol"/> <target dev="vdd"/> </disk> <disk type="network"> <source protocol="https" name="url_path"> <host name="hostname" port="443"/> </source> </disk> </devices> </domain>""" mock_xml.return_value = xml drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._parse_migration_flags() dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) paths = drvr._live_migration_copy_disk_paths(None, None, guest) self.assertEqual((["/var/lib/nova/instance/123/disk.root", "/dev/mapper/somevol"], ['vda', 'vdd']), paths) @mock.patch.object(host.Host, "get_connection") @mock.patch.object(host.Host, "has_min_version", return_value=True) @mock.patch('nova.virt.driver.get_block_device_info') @mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid') @mock.patch.object(fakelibvirt.Domain, "XMLDesc") def test_live_migration_copy_disk_paths_selective_block_migration( self, mock_xml, mock_get_instance, mock_block_device_info, mock_version, mock_conn): xml = """ <domain> <name>dummy</name> <uuid>d4e13113-918e-42fe-9fc9-861693ffd432</uuid> <devices> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.root"/> <target dev="vda"/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.shared"/> <target dev="vdb"/> </disk> <disk type="file"> <source file="/var/lib/nova/instance/123/disk.config"/> <target dev="vdc"/> </disk> <disk type="block"> <source dev="/dev/mapper/somevol"/> <target dev="vdd"/> </disk> <disk type="network"> <source protocol="https" name="url_path"> <host name="hostname" port="443"/> </source> </disk> </devices> </domain>""" mock_xml.return_value = xml instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' block_device_info = { 'swap': { 'disk_bus': u'virtio', 'swap_size': 10, 'device_name': u'/dev/vdc' }, 'root_device_name': u'/dev/vda', 'ephemerals': [{ 'guest_format': u'ext3', 'device_name': u'/dev/vdb', 'disk_bus': u'virtio', 'device_type': u'disk', 'size': 1 }], 'block_device_mapping': [{ 'guest_format': None, 'boot_index': None, 'mount_device': u'/dev/vdd', 'connection_info': { u'driver_volume_type': u'iscsi', 'serial': u'147df29f-aec2-4851-b3fe-f68dad151834', u'data': { u'access_mode': u'rw', u'target_discovered': False, u'encrypted': False, u'qos_specs': None, u'target_iqn': u'iqn.2010-10.org.openstack:' u'volume-147df29f-aec2-4851-b3fe-' u'f68dad151834', u'target_portal': u'10.102.44.141:3260', u'volume_id': u'147df29f-aec2-4851-b3fe-f68dad151834', u'target_lun': 1, u'auth_password': u'cXELT66FngwzTwpf', u'auth_username': u'QbQQjj445uWgeQkFKcVw', u'auth_method': u'CHAP' } }, 'disk_bus': None, 'device_type': None, 'delete_on_termination': False }] } mock_block_device_info.return_value = block_device_info drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dom = fakelibvirt.Domain(drvr._get_connection(), xml, False) guest = libvirt_guest.Guest(dom) return_value = drvr._live_migration_copy_disk_paths(self.context, instance, guest) expected = (['/var/lib/nova/instance/123/disk.root', '/var/lib/nova/instance/123/disk.shared', '/var/lib/nova/instance/123/disk.config'], ['vda', 'vdb', 'vdc']) self.assertEqual(expected, return_value) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths") def test_live_migration_data_gb_plain(self, mock_paths): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) data_gb = drvr._live_migration_data_gb(instance, []) self.assertEqual(2, data_gb) self.assertEqual(0, mock_paths.call_count) def test_live_migration_data_gb_block(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) def fake_stat(path): class StatResult(object): def __init__(self, size): self._size = size @property def st_size(self): return self._size if path == "/var/lib/nova/instance/123/disk.root": return StatResult(10 * units.Gi) elif path == "/dev/mapper/somevol": return StatResult(1.5 * units.Gi) else: raise Exception("Should not be reached") disk_paths = ["/var/lib/nova/instance/123/disk.root", "/dev/mapper/somevol"] with mock.patch.object(os, "stat") as mock_stat: mock_stat.side_effect = fake_stat data_gb = drvr._live_migration_data_gb(instance, disk_paths) # Expecting 2 GB for RAM, plus 10 GB for disk.root # and 1.5 GB rounded to 2 GB for somevol, so 14 GB self.assertEqual(14, data_gb) EXPECT_SUCCESS = 1 EXPECT_FAILURE = 2 EXPECT_ABORT = 3 @mock.patch.object(libvirt_guest.Guest, "migrate_start_postcopy") @mock.patch.object(time, "time") @mock.patch.object(time, "sleep", side_effect=lambda x: eventlet.sleep(0)) @mock.patch.object(host.Host, "get_connection") @mock.patch.object(libvirt_guest.Guest, "get_job_info") @mock.patch.object(objects.Instance, "save") @mock.patch.object(objects.Migration, "save") @mock.patch.object(fakelibvirt.Connection, "_mark_running") @mock.patch.object(fakelibvirt.virDomain, "abortJob") @mock.patch.object(libvirt_guest.Guest, "pause") def _test_live_migration_monitoring(self, job_info_records, time_records, expect_result, mock_pause, mock_abort, mock_running, mock_save, mock_mig_save, mock_job_info, mock_conn, mock_sleep, mock_time, mock_postcopy_switch, current_mig_status=None, expected_mig_status=None, scheduled_action=None, scheduled_action_executed=False, block_migration=False, expected_switch=False): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) drvr.active_migrations[instance.uuid] = deque() dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", True) guest = libvirt_guest.Guest(dom) finish_event = eventlet.event.Event() def fake_job_info(): while True: self.assertGreater(len(job_info_records), 0) rec = job_info_records.pop(0) if type(rec) == str: if rec == "thread-finish": finish_event.send() elif rec == "domain-stop": dom.destroy() elif rec == "force_complete": drvr.active_migrations[instance.uuid].append( "force-complete") else: if len(time_records) > 0: time_records.pop(0) return rec return rec def fake_time(): if len(time_records) > 0: return time_records[0] else: return int( datetime.datetime(2001, 1, 20, 20, 1, 0) .strftime('%s')) mock_job_info.side_effect = fake_job_info mock_time.side_effect = fake_time dest = mock.sentinel.migrate_dest migration = objects.Migration(context=self.context, id=1) migrate_data = objects.LibvirtLiveMigrateData( migration=migration, block_migration=block_migration) if current_mig_status: migrate_data.migration.status = current_mig_status else: migrate_data.migration.status = "unset" migrate_data.migration.save() fake_post_method = mock.MagicMock() fake_recover_method = mock.MagicMock() drvr._live_migration_monitor(self.context, instance, guest, dest, fake_post_method, fake_recover_method, False, migrate_data, finish_event, []) if scheduled_action_executed: if scheduled_action == 'pause': self.assertTrue(mock_pause.called) if scheduled_action == 'postcopy_switch': self.assertTrue(mock_postcopy_switch.called) else: if scheduled_action == 'pause': self.assertFalse(mock_pause.called) if scheduled_action == 'postcopy_switch': self.assertFalse(mock_postcopy_switch.called) mock_mig_save.assert_called_with() if expect_result == self.EXPECT_SUCCESS: self.assertFalse(fake_recover_method.called, 'Recover method called when success expected') self.assertFalse(mock_abort.called, 'abortJob not called when success expected') if expected_switch: self.assertTrue(mock_postcopy_switch.called) fake_post_method.assert_called_once_with( self.context, instance, dest, False, migrate_data) else: if expect_result == self.EXPECT_ABORT: self.assertTrue(mock_abort.called, 'abortJob called when abort expected') else: self.assertFalse(mock_abort.called, 'abortJob not called when failure expected') self.assertFalse(fake_post_method.called, 'Post method called when success not expected') if expected_mig_status: fake_recover_method.assert_called_once_with( self.context, instance, dest, False, migrate_data, migration_status=expected_mig_status) else: fake_recover_method.assert_called_once_with( self.context, instance, dest, False, migrate_data) self.assertNotIn(instance.uuid, drvr.active_migrations) def test_live_migration_monitor_success(self): # A normal sequence where see all the normal job states domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS) def test_live_migration_handle_pause_normal(self): # A normal sequence where see all the normal job states, and pause # scheduled in between VIR_DOMAIN_JOB_UNBOUNDED domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="running", scheduled_action="pause", scheduled_action_executed=True) def test_live_migration_handle_pause_on_start(self): # A normal sequence where see all the normal job states, and pause # scheduled in case of job type VIR_DOMAIN_JOB_NONE and finish_event is # not ready yet domain_info_records = [ "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="preparing", scheduled_action="pause", scheduled_action_executed=True) def test_live_migration_handle_pause_on_finish(self): # A normal sequence where see all the normal job states, and pause # scheduled in case of job type VIR_DOMAIN_JOB_NONE and finish_event is # ready domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="completed", scheduled_action="pause", scheduled_action_executed=False) def test_live_migration_handle_pause_on_cancel(self): # A normal sequence where see all the normal job states, and pause # scheduled in case of job type VIR_DOMAIN_JOB_CANCELLED domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE, current_mig_status="cancelled", expected_mig_status='cancelled', scheduled_action="pause", scheduled_action_executed=False) def test_live_migration_handle_pause_on_failure(self): # A normal sequence where see all the normal job states, and pause # scheduled in case of job type VIR_DOMAIN_JOB_FAILED domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_FAILED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE, scheduled_action="pause", scheduled_action_executed=False) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_postcopy_normal(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and postcopy # switch scheduled in between VIR_DOMAIN_JOB_UNBOUNDED mock_postcopy_enabled.return_value = True domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="running", scheduled_action="postcopy_switch", scheduled_action_executed=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_postcopy_on_start(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and postcopy # switch scheduled in case of job type VIR_DOMAIN_JOB_NONE and # finish_event is not ready yet mock_postcopy_enabled.return_value = True domain_info_records = [ "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="preparing", scheduled_action="postcopy_switch", scheduled_action_executed=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_postcopy_on_finish(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and postcopy # switch scheduled in case of job type VIR_DOMAIN_JOB_NONE and # finish_event is ready mock_postcopy_enabled.return_value = True domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="completed", scheduled_action="postcopy_switch", scheduled_action_executed=False) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_postcopy_on_cancel(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and postcopy # scheduled in case of job type VIR_DOMAIN_JOB_CANCELLED mock_postcopy_enabled.return_value = True domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE, current_mig_status="cancelled", expected_mig_status='cancelled', scheduled_action="postcopy_switch", scheduled_action_executed=False) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_pause_on_postcopy(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and pause # scheduled after migration switched to postcopy mock_postcopy_enabled.return_value = True domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="running (post-copy)", scheduled_action="pause", scheduled_action_executed=False) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_postcopy_on_postcopy(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and pause # scheduled after migration switched to postcopy mock_postcopy_enabled.return_value = True domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, current_mig_status="running (post-copy)", scheduled_action="postcopy_switch", scheduled_action_executed=False) @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_handle_postcopy_on_failure(self, mock_postcopy_enabled): # A normal sequence where see all the normal job states, and postcopy # scheduled in case of job type VIR_DOMAIN_JOB_FAILED mock_postcopy_enabled.return_value = True domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", "force_complete", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_FAILED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE, scheduled_action="postcopy_switch", scheduled_action_executed=False) def test_live_migration_monitor_success_race(self): # A normalish sequence but we're too slow to see the # completed job state domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS) def test_live_migration_monitor_failed(self): # A failed sequence where we see all the expected events domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_FAILED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE) def test_live_migration_monitor_failed_race(self): # A failed sequence where we are too slow to see the # failed event domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE) def test_live_migration_monitor_cancelled(self): # A cancelled sequence where we see all the events domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_FAILURE, expected_mig_status='cancelled') @mock.patch.object(fakelibvirt.virDomain, "migrateSetMaxDowntime") @mock.patch.object(libvirt_driver.LibvirtDriver, "_migration_downtime_steps") def test_live_migration_monitor_downtime(self, mock_downtime_steps, mock_set_downtime): self.flags(live_migration_completion_timeout=1000000, live_migration_progress_timeout=1000000, group='libvirt') # We've setup 4 fake downtime steps - first value is the # time delay, second is the downtime value downtime_steps = [ (90, 10), (180, 50), (270, 200), (500, 300), ] mock_downtime_steps.return_value = downtime_steps # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. # Times are chosen so that only the first 3 downtime # steps are needed. fake_times = [0, 1, 30, 95, 150, 200, 300] # A normal sequence where see all the normal job states domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_SUCCESS) mock_set_downtime.assert_has_calls([mock.call(10), mock.call(50), mock.call(200)]) def test_live_migration_monitor_completion(self): self.flags(live_migration_completion_timeout=100, live_migration_progress_timeout=1000000, group='libvirt') # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320] # A normal sequence where see all the normal job states domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_ABORT, expected_mig_status='cancelled') def test_live_migration_monitor_progress(self): self.flags(live_migration_completion_timeout=1000000, live_migration_progress_timeout=150, group='libvirt') # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320] # A normal sequence where see all the normal job states domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_ABORT, expected_mig_status='cancelled') def test_live_migration_monitor_progress_zero_data_remaining(self): self.flags(live_migration_completion_timeout=1000000, live_migration_progress_timeout=150, group='libvirt') # Each one of these fake times is used for time.time() # when a new domain_info_records entry is consumed. fake_times = [0, 40, 80, 120, 160, 200, 240, 280, 320] # A normal sequence where see all the normal job states domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=0), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=90), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=70), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=50), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=30), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=10), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED, data_remaining=0), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_FAILED), ] self._test_live_migration_monitoring(domain_info_records, fake_times, self.EXPECT_FAILURE) def test_live_migration_downtime_steps(self): self.flags(live_migration_downtime=400, group='libvirt') self.flags(live_migration_downtime_steps=10, group='libvirt') self.flags(live_migration_downtime_delay=30, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) steps = drvr._migration_downtime_steps(3.0) self.assertEqual([ (0, 37), (90, 38), (180, 39), (270, 42), (360, 46), (450, 55), (540, 70), (630, 98), (720, 148), (810, 238), (900, 400), ], list(steps)) @mock.patch('nova.virt.libvirt.migration.should_switch_to_postcopy') @mock.patch.object(libvirt_driver.LibvirtDriver, "_is_post_copy_enabled") def test_live_migration_monitor_postcopy_switch(self, mock_postcopy_enabled, mock_should_switch): # A normal sequence where migration is switched to postcopy mode mock_postcopy_enabled.return_value = True switch_values = [False, False, True] mock_should_switch.return_value = switch_values domain_info_records = [ libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_NONE), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED), "thread-finish", "domain-stop", libvirt_guest.JobInfo( type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED), ] self._test_live_migration_monitoring(domain_info_records, [], self.EXPECT_SUCCESS, expected_switch=True) @mock.patch.object(host.Host, "get_connection") @mock.patch.object(utils, "spawn") @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor") @mock.patch.object(host.Host, "get_guest") @mock.patch.object(fakelibvirt.Connection, "_mark_running") @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_copy_disk_paths") def test_live_migration_main(self, mock_copy_disk_path, mock_running, mock_guest, mock_monitor, mock_thread, mock_conn): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) dom = fakelibvirt.Domain(drvr._get_connection(), "<domain><name>demo</name></domain>", True) guest = libvirt_guest.Guest(dom) migrate_data = objects.LibvirtLiveMigrateData(block_migration=True) disks_to_copy = (['/some/path/one', '/test/path/two'], ['vda', 'vdb']) mock_copy_disk_path.return_value = disks_to_copy mock_guest.return_value = guest def fake_post(): pass def fake_recover(): pass drvr._live_migration(self.context, instance, "fakehost", fake_post, fake_recover, True, migrate_data) mock_copy_disk_path.assert_called_once_with(self.context, instance, guest) class AnyEventletEvent(object): def __eq__(self, other): return type(other) == eventlet.event.Event mock_thread.assert_called_once_with( drvr._live_migration_operation, self.context, instance, "fakehost", True, migrate_data, guest, disks_to_copy[1]) mock_monitor.assert_called_once_with( self.context, instance, guest, "fakehost", fake_post, fake_recover, True, migrate_data, AnyEventletEvent(), disks_to_copy[0]) def _do_test_create_images_and_backing(self, disk_type): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk') self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image') disk_info = {'path': 'foo', 'type': disk_type, 'disk_size': 1 * 1024 ** 3, 'virt_disk_size': 20 * 1024 ** 3, 'backing_file': None} libvirt_driver.libvirt_utils.create_image( disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size']) drvr._fetch_instance_kernel_ramdisk(self.context, instance, fallback_from_host=None) self.mox.ReplayAll() self.stub_out('os.path.exists', lambda *args: False) drvr._create_images_and_backing(self.context, instance, "/fake/instance/dir", [disk_info]) def test_create_images_and_backing_qcow2(self): self._do_test_create_images_and_backing('qcow2') def test_create_images_and_backing_raw(self): self._do_test_create_images_and_backing('raw') def test_create_images_and_backing_images_not_exist_no_fallback(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) backing_file = imagecache.get_cache_fname(instance.image_ref) disk_info = [ {u'backing_file': backing_file, u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] with mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image', side_effect=exception.ImageNotFound( image_id="fake_id")): self.assertRaises(exception.ImageNotFound, conn._create_images_and_backing, self.context, instance, "/fake/instance/dir", disk_info) def test_create_images_and_backing_images_not_exist_fallback(self): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'kernel_id': uuids.kernel_id, 'ramdisk_id': uuids.ramdisk_id, 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) backing_file = imagecache.get_cache_fname(instance.image_ref) disk_info = [ {u'backing_file': backing_file, u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] with test.nested( mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image'), mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image', side_effect=exception.ImageNotFound( image_id=uuids.fake_id)), ) as (copy_image_mock, fetch_image_mock): conn._create_images_and_backing(self.context, instance, "/fake/instance/dir", disk_info, fallback_from_host="fake_host") backfile_path = os.path.join(base_dir, backing_file) kernel_path = os.path.join(CONF.instances_path, self.test_instance['uuid'], 'kernel') ramdisk_path = os.path.join(CONF.instances_path, self.test_instance['uuid'], 'ramdisk') copy_image_mock.assert_has_calls([ mock.call(dest=backfile_path, src=backfile_path, host='fake_host', receive=True), mock.call(dest=kernel_path, src=kernel_path, host='fake_host', receive=True), mock.call(dest=ramdisk_path, src=ramdisk_path, host='fake_host', receive=True) ]) fetch_image_mock.assert_has_calls([ mock.call(context=self.context, target=backfile_path, image_id=self.test_instance['image_ref']), mock.call(self.context, kernel_path, instance.kernel_id), mock.call(self.context, ramdisk_path, instance.ramdisk_id) ]) @mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image') def test_create_images_and_backing_images_exist(self, mock_fetch_image): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.test_instance.update({'user_id': 'fake-user', 'os_type': None, 'kernel_id': 'fake_kernel_id', 'ramdisk_id': 'fake_ramdisk_id', 'project_id': 'fake-project'}) instance = objects.Instance(**self.test_instance) disk_info = [ {u'backing_file': imagecache.get_cache_fname(instance.image_ref), u'disk_size': 10747904, u'path': u'disk_path', u'type': u'qcow2', u'virt_disk_size': 25165824}] with test.nested( mock.patch.object(imagebackend.Image, 'get_disk_size'), mock.patch.object(os.path, 'exists', return_value=True) ): conn._create_images_and_backing(self.context, instance, '/fake/instance/dir', disk_info) self.assertFalse(mock_fetch_image.called) def test_create_images_and_backing_ephemeral_gets_created(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) base_dir = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) instance = objects.Instance(**self.test_instance) disk_info_byname = fake_disk_info_byname(instance) disk_info = disk_info_byname.values() # Give the ephemeral disk a non-default name disk_info_byname['disk.local']['backing_file'] = 'ephemeral_foo' with test.nested( mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'), mock.patch.object(drvr, '_create_ephemeral'), mock.patch.object(imagebackend.Image, 'verify_base_size'), mock.patch.object(imagebackend.Image, 'get_disk_size') ) as (fetch_image_mock, create_ephemeral_mock, verify_base_size_mock, disk_size_mock): drvr._create_images_and_backing(self.context, instance, CONF.instances_path, disk_info) self.assertEqual(len(create_ephemeral_mock.call_args_list), 1) root_backing, ephemeral_backing = [ os.path.join(base_dir, name) for name in (disk_info_byname['disk']['backing_file'], 'ephemeral_foo') ] m_args, m_kwargs = create_ephemeral_mock.call_args_list[0] self.assertEqual(ephemeral_backing, m_kwargs['target']) self.assertEqual(len(fetch_image_mock.call_args_list), 1) m_args, m_kwargs = fetch_image_mock.call_args_list[0] self.assertEqual(root_backing, m_kwargs['target']) verify_base_size_mock.assert_has_calls([ mock.call(root_backing, instance.flavor.root_gb * units.Gi), mock.call(ephemeral_backing, instance.flavor.ephemeral_gb * units.Gi) ]) def test_create_images_and_backing_disk_info_none(self): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_backend = self.useFixture(fake_imagebackend.ImageBackendFixture()) drvr._create_images_and_backing(self.context, instance, "/fake/instance/dir", None) # Assert that we did nothing self.assertEqual({}, fake_backend.created_disks) def _generate_target_ret(self, target_connect_addr=None): target_ret = { 'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'target_connect_addr': target_connect_addr, 'serial_listen_addr': '127.0.0.1', 'volume': { '12345': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}, 'serial': '12345'}, 'disk_info': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}}, '67890': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}, 'serial': '67890'}, 'disk_info': {'bus': 'scsi', 'dev': 'sdb', 'type': 'disk'}}}} return target_ret def test_pre_live_migration_works_correctly_mocked(self): self._test_pre_live_migration_works_correctly_mocked() def test_pre_live_migration_with_transport_ip(self): self.flags(live_migration_inbound_addr='127.0.0.2', group='libvirt') target_ret = self._generate_target_ret('127.0.0.2') self._test_pre_live_migration_works_correctly_mocked(target_ret) def _test_pre_live_migration_works_correctly_mocked(self, target_ret=None): # Creating testdata vol = {'block_device_mapping': [ {'connection_info': {'serial': '12345', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}}, 'mount_device': '/dev/sda'}, {'connection_info': {'serial': '67890', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}, 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) class FakeNetworkInfo(object): def fixed_ips(self): return ["test_ip_addr"] def fake_none(*args, **kwargs): return self.stubs.Set(drvr, '_create_images_and_backing', fake_none) instance = objects.Instance(**self.test_instance) c = context.get_admin_context() nw_info = FakeNetworkInfo() # Creating mocks self.mox.StubOutWithMock(driver, "block_device_info_get_mapping") driver.block_device_info_get_mapping(vol ).AndReturn(vol['block_device_mapping']) self.mox.StubOutWithMock(drvr, "_connect_volume") for v in vol['block_device_mapping']: disk_info = { 'bus': "scsi", 'dev': v['mount_device'].rpartition("/")[2], 'type': "disk" } drvr._connect_volume(v['connection_info'], disk_info) self.mox.StubOutWithMock(drvr, 'plug_vifs') drvr.plug_vifs(mox.IsA(instance), nw_info) self.mox.ReplayAll() migrate_data = migrate_data_obj.LibvirtLiveMigrateData( block_migration=False, instance_relative_path='foo', is_shared_block_storage=False, is_shared_instance_path=False, ) result = drvr.pre_live_migration( c, instance, vol, nw_info, None, migrate_data=migrate_data) if not target_ret: target_ret = self._generate_target_ret() self.assertEqual( result.to_legacy_dict( pre_migration_result=True)['pre_live_migration_result'], target_ret) @mock.patch.object(os, 'mkdir') @mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination') @mock.patch('nova.virt.libvirt.driver.remotefs.' 'RemoteFilesystem.copy_file') @mock.patch('nova.virt.driver.block_device_info_get_mapping') @mock.patch('nova.virt.configdrive.required_by', return_value=True) def test_pre_live_migration_block_with_config_drive_success( self, mock_required_by, block_device_info_get_mapping, mock_copy_file, mock_get_instance_path, mock_mkdir): self.flags(config_drive_format='iso9660') vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} fake_instance_path = os.path.join(cfg.CONF.instances_path, '/fake_instance_uuid') mock_get_instance_path.return_value = fake_instance_path drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) migrate_data = objects.LibvirtLiveMigrateData() migrate_data.is_shared_instance_path = False migrate_data.is_shared_block_storage = False migrate_data.block_migration = True migrate_data.instance_relative_path = 'foo' src = "%s:%s/disk.config" % (instance.host, fake_instance_path) result = drvr.pre_live_migration( self.context, instance, vol, [], None, migrate_data) block_device_info_get_mapping.assert_called_once_with( {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'} ]} ) mock_copy_file.assert_called_once_with(src, fake_instance_path) migrate_data.graphics_listen_addrs_vnc = '127.0.0.1' migrate_data.graphics_listen_addrs_spice = '127.0.0.1' migrate_data.serial_listen_addr = '127.0.0.1' self.assertEqual(migrate_data, result) @mock.patch('nova.virt.driver.block_device_info_get_mapping', return_value=()) def test_pre_live_migration_block_with_config_drive_mocked_with_vfat( self, block_device_info_get_mapping): self.flags(config_drive_format='vfat') # Creating testdata vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) instance.config_drive = 'True' migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_instance_path=False, is_shared_block_storage=False, block_migration=False, instance_relative_path='foo', ) res_data = drvr.pre_live_migration( self.context, instance, vol, [], None, migrate_data) res_data = res_data.to_legacy_dict(pre_migration_result=True) block_device_info_get_mapping.assert_called_once_with( {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sda'}, {'connection_info': 'dummy', 'mount_device': '/dev/sdb'} ]} ) self.assertEqual({'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'target_connect_addr': None, 'serial_listen_addr': '127.0.0.1', 'volume': {}}, res_data['pre_live_migration_result']) def test_pre_live_migration_vol_backed_works_correctly_mocked(self): # Creating testdata, using temp dir. with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) vol = {'block_device_mapping': [ {'connection_info': {'serial': '12345', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}}, 'mount_device': '/dev/sda'}, {'connection_info': {'serial': '67890', u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}, 'mount_device': '/dev/sdb'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) def fake_none(*args, **kwargs): return self.stubs.Set(drvr, '_create_images_and_backing', fake_none) class FakeNetworkInfo(object): def fixed_ips(self): return ["test_ip_addr"] inst_ref = objects.Instance(**self.test_instance) c = context.get_admin_context() nw_info = FakeNetworkInfo() # Creating mocks self.mox.StubOutWithMock(drvr, "_connect_volume") for v in vol['block_device_mapping']: disk_info = { 'bus': "scsi", 'dev': v['mount_device'].rpartition("/")[2], 'type': "disk" } drvr._connect_volume(v['connection_info'], disk_info) self.mox.StubOutWithMock(drvr, 'plug_vifs') drvr.plug_vifs(mox.IsA(inst_ref), nw_info) self.mox.ReplayAll() migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_instance_path=False, is_shared_block_storage=False, is_volume_backed=True, block_migration=False, instance_relative_path=inst_ref['name'], disk_over_commit=False, disk_available_mb=123, image_type='qcow2', filename='foo', ) ret = drvr.pre_live_migration(c, inst_ref, vol, nw_info, None, migrate_data) target_ret = { 'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'}, 'target_connect_addr': None, 'serial_listen_addr': '127.0.0.1', 'volume': { '12345': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}, 'serial': '12345'}, 'disk_info': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}}, '67890': {'connection_info': {u'data': {'device_path': u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}, 'serial': '67890'}, 'disk_info': {'bus': 'scsi', 'dev': 'sdb', 'type': 'disk'}}}} self.assertEqual( ret.to_legacy_dict(True)['pre_live_migration_result'], target_ret) self.assertTrue(os.path.exists('%s/%s/' % (tmpdir, inst_ref['name']))) def test_pre_live_migration_plug_vifs_retry_fails(self): self.flags(live_migration_retry_count=3) instance = objects.Instance(**self.test_instance) def fake_plug_vifs(instance, network_info): raise processutils.ProcessExecutionError() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: eventlet.sleep(0)) disk_info_json = jsonutils.dumps({}) migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_block_storage=True, is_shared_instance_path=True, block_migration=False, ) self.assertRaises(processutils.ProcessExecutionError, drvr.pre_live_migration, self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) def test_pre_live_migration_plug_vifs_retry_works(self): self.flags(live_migration_retry_count=3) called = {'count': 0} instance = objects.Instance(**self.test_instance) def fake_plug_vifs(instance, network_info): called['count'] += 1 if called['count'] < CONF.live_migration_retry_count: raise processutils.ProcessExecutionError() else: return drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(eventlet.greenthread, 'sleep', lambda x: eventlet.sleep(0)) disk_info_json = jsonutils.dumps({}) migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_block_storage=True, is_shared_instance_path=True, block_migration=False, ) drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) def test_pre_live_migration_image_not_created_with_shared_storage(self): migrate_data_set = [{'is_shared_block_storage': False, 'is_shared_instance_path': True, 'is_volume_backed': False, 'filename': 'foo', 'instance_relative_path': 'bar', 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'block_migration': False}, {'is_shared_block_storage': True, 'is_shared_instance_path': True, 'is_volume_backed': False, 'filename': 'foo', 'instance_relative_path': 'bar', 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'block_migration': False}, {'is_shared_block_storage': False, 'is_shared_instance_path': True, 'is_volume_backed': False, 'filename': 'foo', 'instance_relative_path': 'bar', 'disk_over_commit': False, 'disk_available_mb': 123, 'image_type': 'qcow2', 'block_migration': True}] def _to_obj(d): return migrate_data_obj.LibvirtLiveMigrateData(**d) migrate_data_set = map(_to_obj, migrate_data_set) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) # creating mocks with test.nested( mock.patch.object(drvr, '_create_images_and_backing'), mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'), mock.patch.object(drvr, 'plug_vifs'), ) as ( create_image_mock, rules_mock, plug_mock, ): disk_info_json = jsonutils.dumps({}) for migrate_data in migrate_data_set: res = drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) self.assertFalse(create_image_mock.called) self.assertIsInstance(res, objects.LibvirtLiveMigrateData) def test_pre_live_migration_with_not_shared_instance_path(self): migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_block_storage=False, is_shared_instance_path=False, block_migration=False, instance_relative_path='foo', ) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) def check_instance_dir(context, instance, instance_dir, disk_info, fallback_from_host=False): self.assertTrue(instance_dir) # creating mocks with test.nested( mock.patch.object(drvr, '_create_images_and_backing', side_effect=check_instance_dir), mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'), mock.patch.object(drvr, 'plug_vifs'), ) as ( create_image_mock, rules_mock, plug_mock, ): disk_info_json = jsonutils.dumps({}) res = drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=disk_info_json, migrate_data=migrate_data) create_image_mock.assert_has_calls( [mock.call(self.context, instance, mock.ANY, {}, fallback_from_host=instance.host)]) self.assertIsInstance(res, objects.LibvirtLiveMigrateData) def test_pre_live_migration_recreate_disk_info(self): migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_block_storage=False, is_shared_instance_path=False, block_migration=True, instance_relative_path='/some/path/', ) disk_info = [{'disk_size': 5368709120, 'type': 'raw', 'virt_disk_size': 5368709120, 'path': '/some/path/disk', 'backing_file': '', 'over_committed_disk_size': 0}, {'disk_size': 1073741824, 'type': 'raw', 'virt_disk_size': 1073741824, 'path': '/some/path/disk.eph0', 'backing_file': '', 'over_committed_disk_size': 0}] image_disk_info = {'/some/path/disk': 'raw', '/some/path/disk.eph0': 'raw'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) instance_path = os.path.dirname(disk_info[0]['path']) disk_info_path = os.path.join(instance_path, 'disk.info') with test.nested( mock.patch.object(os, 'mkdir'), mock.patch.object(fake_libvirt_utils, 'write_to_file'), mock.patch.object(drvr, '_create_images_and_backing') ) as ( mkdir, write_to_file, create_images_and_backing ): drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=jsonutils.dumps(disk_info), migrate_data=migrate_data) write_to_file.assert_called_with(disk_info_path, jsonutils.dumps(image_disk_info)) def test_pre_live_migration_with_perf_events(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._supported_perf_events = ['cmt'] migrate_data = migrate_data_obj.LibvirtLiveMigrateData( is_shared_block_storage=False, is_shared_instance_path=False, block_migration=False, instance_relative_path='foo', ) instance = objects.Instance(**self.test_instance) res = drvr.pre_live_migration(self.context, instance, block_device_info=None, network_info=[], disk_info=None, migrate_data=migrate_data) self.assertEqual(['cmt'], res.supported_perf_events) def test_get_instance_disk_info_works_correctly(self): # Test data instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file' self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) os.path.getsize('/test/disk.local').AndReturn((3328599655)) ret = ("image: /test/disk\n" "file format: raw\n" "virtual size: 20G (21474836480 bytes)\n" "disk size: 3.1G\n" "cluster_size: 2097152\n" "backing file: /test/dummy (actual path: /backing/file)\n") self.mox.StubOutWithMock(os.path, "exists") os.path.exists('/test/disk.local').AndReturn(True) self.mox.StubOutWithMock(utils, "execute") utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', '/test/disk.local', prlimit = images.QEMU_IMG_LIMITS, ).AndReturn((ret, '')) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance) info = jsonutils.loads(info) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) self.assertEqual(info[1]['type'], 'qcow2') self.assertEqual(info[1]['path'], '/test/disk.local') self.assertEqual(info[1]['virt_disk_size'], 21474836480) self.assertEqual(info[1]['backing_file'], "file") self.assertEqual(info[1]['over_committed_disk_size'], 18146236825) def test_post_live_migration(self): vol = {'block_device_mapping': [ {'connection_info': { 'data': {'multipath_id': 'dummy1'}, 'serial': 'fake_serial1'}, 'mount_device': '/dev/sda', }, {'connection_info': { 'data': {}, 'serial': 'fake_serial2'}, 'mount_device': '/dev/sdb', }]} def fake_initialize_connection(context, volume_id, connector): return {'data': {}} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_connector = {'host': 'fake'} inst_ref = {'id': 'foo'} cntx = context.get_admin_context() # Set up the mock expectations with test.nested( mock.patch.object(driver, 'block_device_info_get_mapping', return_value=vol['block_device_mapping']), mock.patch.object(drvr, "get_volume_connector", return_value=fake_connector), mock.patch.object(drvr._volume_api, "initialize_connection", side_effect=fake_initialize_connection), mock.patch.object(drvr, '_disconnect_volume') ) as (block_device_info_get_mapping, get_volume_connector, initialize_connection, _disconnect_volume): drvr.post_live_migration(cntx, inst_ref, vol) block_device_info_get_mapping.assert_has_calls([ mock.call(vol)]) get_volume_connector.assert_has_calls([ mock.call(inst_ref)]) _disconnect_volume.assert_has_calls([ mock.call({'data': {'multipath_id': 'dummy1'}}, 'sda'), mock.call({'data': {}}, 'sdb')]) def test_get_instance_disk_info_excludes_volumes(self): # Test data instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/fake/path/to/volume1'/>" "<target dev='vdc' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/fake/path/to/volume2'/>" "<target dev='vdd' bus='virtio'/></disk>" "</devices></domain>") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file' self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) os.path.getsize('/test/disk.local').AndReturn((3328599655)) ret = ("image: /test/disk\n" "file format: raw\n" "virtual size: 20G (21474836480 bytes)\n" "disk size: 3.1G\n" "cluster_size: 2097152\n" "backing file: /test/dummy (actual path: /backing/file)\n") self.mox.StubOutWithMock(os.path, "exists") os.path.exists('/test/disk.local').AndReturn(True) self.mox.StubOutWithMock(utils, "execute") utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info', '/test/disk.local', prlimit = images.QEMU_IMG_LIMITS, ).AndReturn((ret, '')) self.mox.ReplayAll() conn_info = {'driver_volume_type': 'fake'} info = {'block_device_mapping': [ {'connection_info': conn_info, 'mount_device': '/dev/vdc'}, {'connection_info': conn_info, 'mount_device': '/dev/vdd'}]} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance, block_device_info=info) info = jsonutils.loads(info) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) self.assertEqual(info[1]['type'], 'qcow2') self.assertEqual(info[1]['path'], '/test/disk.local') self.assertEqual(info[1]['virt_disk_size'], 21474836480) self.assertEqual(info[1]['backing_file'], "file") self.assertEqual(info[1]['over_committed_disk_size'], 18146236825) def test_get_instance_disk_info_no_bdinfo_passed(self): # NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method # without access to Nova's block device information. We want to make # sure that we guess volumes mostly correctly in that case as well instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='block'><driver name='qemu' type='raw'/>" "<source file='/fake/path/to/volume1'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") # Preparing mocks vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance.name: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi self.mox.StubOutWithMock(os.path, "getsize") os.path.getsize('/test/disk').AndReturn((10737418240)) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_instance_disk_info(instance) info = jsonutils.loads(info) self.assertEqual(1, len(info)) self.assertEqual(info[0]['type'], 'raw') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 10737418240) self.assertEqual(info[0]['backing_file'], "") self.assertEqual(info[0]['over_committed_disk_size'], 0) def test_spawn_with_network_info(self): # Preparing mocks def fake_none(*args, **kwargs): return def fake_getLibVersion(): return fakelibvirt.FAKE_LIBVIRT_VERSION def fake_getCapabilities(): return """ <capabilities> <host> <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid> <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <topology sockets='1' cores='2' threads='1'/> <feature name='xtpr'/> </cpu> </host> </capabilities> """ def fake_baselineCPU(cpu, flag): return """<cpu mode='custom' match='exact'> <model fallback='allow'>Penryn</model> <vendor>Intel</vendor> <feature policy='require' name='xtpr'/> </cpu> """ # _fake_network_info must be called before create_fake_libvirt_mock(), # as _fake_network_info calls importutils.import_class() and # create_fake_libvirt_mock() mocks importutils.import_class(). network_info = _fake_network_info(self, 1) self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion, getCapabilities=fake_getCapabilities, getVersion=lambda: 1005001, baselineCPU=fake_baselineCPU) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 # we send an int to test sha1 call instance = objects.Instance(**instance_ref) instance.config_drive = '' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_build_device_metadata') libvirt_driver.LibvirtDriver._build_device_metadata(self.context, instance) # Mock out the get_info method of the LibvirtDriver so that the polling # in the spawn method of the LibvirtDriver returns immediately self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info') libvirt_driver.LibvirtDriver.get_info(instance ).AndReturn(hardware.InstanceInfo(state=power_state.RUNNING)) # Start test self.mox.ReplayAll() with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt: del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr.firewall_driver, 'setup_basic_filtering', fake_none) self.stubs.Set(drvr.firewall_driver, 'prepare_instance_filter', fake_none) self.stubs.Set(imagebackend.Image, 'cache', fake_none) drvr.spawn(self.context, instance, image_meta, [], 'herp', network_info=network_info) path = os.path.join(CONF.instances_path, instance['name']) if os.path.isdir(path): shutil.rmtree(path) path = os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name) if os.path.isdir(path): shutil.rmtree(os.path.join(CONF.instances_path, CONF.image_cache_subdirectory_name)) # Methods called directly by spawn() @mock.patch.object(libvirt_driver.LibvirtDriver, '_get_guest_xml') @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain_and_network') @mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') # Methods called by _create_configdrive via post_xml_callback @mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata') @mock.patch.object(instance_metadata, 'InstanceMetadata') def test_spawn_with_config_drive(self, mock_instance_metadata, mock_build_device_metadata, mock_get_info, mock_create_domain_and_network, mock_get_guest_xml): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) instance.config_drive = 'True' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) instance_info = hardware.InstanceInfo(state=power_state.RUNNING) mock_build_device_metadata.return_value = None def fake_create_domain_and_network( context, xml, instance, network_info, disk_info, block_device_info=None, power_on=True, reboot=False, vifs_already_plugged=False, post_xml_callback=None): # The config disk should be created by this callback, so we need # to execute it. post_xml_callback() fake_backend = self.useFixture( fake_imagebackend.ImageBackendFixture(exists=lambda _: False)) mock_get_info.return_value = instance_info mock_create_domain_and_network.side_effect = \ fake_create_domain_and_network drvr.spawn(self.context, instance, image_meta, [], None) # We should have imported 'disk.config' config_disk = fake_backend.disks['disk.config'] config_disk.import_file.assert_called_once_with(instance, mock.ANY, 'disk.config') def test_spawn_without_image_meta(self): def fake_none(*args, **kwargs): return def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) fake_backend = self.useFixture(fake_imagebackend.ImageBackendFixture()) drvr.spawn(self.context, instance, image_meta, [], None) # We should have created a root disk and an ephemeral disk self.assertEqual(['disk', 'disk.local'], sorted(fake_backend.created_disks.keys())) def test_spawn_from_volume_calls_cache(self): self.cache_called_for_disk = False def fake_none(*args, **kwargs): return def fake_cache(*args, **kwargs): if kwargs.get('image_id') == 'my_fake_image': self.cache_called_for_disk = True def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(imagebackend.Image, 'cache', fake_cache) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) block_device_info = {'root_device_name': '/dev/vda', 'block_device_mapping': [ {'mount_device': 'vda', 'boot_index': 0} ] } image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # Volume-backed instance created without image instance_ref = self.test_instance instance_ref['image_ref'] = '' instance_ref['root_device_name'] = '/dev/vda' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, image_meta, [], None, block_device_info=block_device_info) self.assertFalse(self.cache_called_for_disk) # Booted from volume but with placeholder image instance_ref = self.test_instance instance_ref['image_ref'] = 'my_fake_image' instance_ref['root_device_name'] = '/dev/vda' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, image_meta, [], None, block_device_info=block_device_info) self.assertFalse(self.cache_called_for_disk) # Booted from an image instance_ref['image_ref'] = 'my_fake_image' instance_ref['uuid'] = uuidutils.generate_uuid() instance = objects.Instance(**instance_ref) drvr.spawn(self.context, instance, image_meta, [], None) self.assertTrue(self.cache_called_for_disk) def test_start_lxc_from_volume(self): self.flags(virt_type="lxc", group='libvirt') def check_setup_container(image, container_dir=None): self.assertIsInstance(image, imgmodel.LocalBlockImage) self.assertEqual(image.path, '/dev/path/to/dev') return '/dev/nbd1' bdm = { 'guest_format': None, 'boot_index': 0, 'mount_device': '/dev/sda', 'connection_info': { 'driver_volume_type': 'iscsi', 'serial': 'afc1', 'data': { 'access_mode': 'rw', 'target_discovered': False, 'encrypted': False, 'qos_specs': None, 'target_iqn': 'iqn: volume-afc1', 'target_portal': 'ip: 3260', 'volume_id': 'afc1', 'target_lun': 1, 'auth_password': 'uj', 'auth_username': '47', 'auth_method': 'CHAP' } }, 'disk_bus': 'scsi', 'device_type': 'disk', 'delete_on_termination': False } def _connect_volume_side_effect(connection_info, disk_info): bdm['connection_info']['data']['device_path'] = '/dev/path/to/dev' def _get(key, opt=None): return bdm.get(key, opt) def getitem(key): return bdm[key] def setitem(key, val): bdm[key] = val bdm_mock = mock.MagicMock() bdm_mock.__getitem__.side_effect = getitem bdm_mock.__setitem__.side_effect = setitem bdm_mock.get = _get disk_mock = mock.MagicMock() disk_mock.source_path = '/dev/path/to/dev' block_device_info = {'block_device_mapping': [bdm_mock], 'root_device_name': '/dev/sda'} # Volume-backed instance created without image instance_ref = self.test_instance instance_ref['image_ref'] = '' instance_ref['root_device_name'] = '/dev/sda' instance_ref['ephemeral_gb'] = 0 instance_ref['uuid'] = uuidutils.generate_uuid() inst_obj = objects.Instance(**instance_ref) image_meta = objects.ImageMeta.from_dict({}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, '_connect_volume', side_effect=_connect_volume_side_effect), mock.patch.object(drvr, '_get_volume_config', return_value=disk_mock), mock.patch.object(drvr, 'get_info', return_value=hardware.InstanceInfo( state=power_state.RUNNING)), mock.patch('nova.virt.disk.api.setup_container', side_effect=check_setup_container), mock.patch('nova.virt.disk.api.teardown_container'), mock.patch.object(objects.Instance, 'save')): drvr.spawn(self.context, inst_obj, image_meta, [], None, network_info=[], block_device_info=block_device_info) self.assertEqual('/dev/nbd1', inst_obj.system_metadata.get( 'rootfs_device_name')) def test_spawn_with_pci_devices(self): def fake_none(*args, **kwargs): return None def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) class FakeLibvirtPciDevice(object): def dettach(self): return None def reset(self): return None def fake_node_device_lookup_by_name(address): pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}" % dict(hex='[\da-f]', oct='[0-8]')) pattern = re.compile(pattern) if pattern.match(address) is None: raise fakelibvirt.libvirtError() return FakeLibvirtPciDevice() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) mock_connection = mock.MagicMock( nodeDeviceLookupByName=fake_node_device_lookup_by_name) instance_ref = self.test_instance instance_ref['image_ref'] = 'my_fake_image' instance = objects.Instance(**instance_ref) instance['pci_devices'] = objects.PciDeviceList( objects=[objects.PciDevice(address='0000:00:00.0')]) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) self.useFixture(fake_imagebackend.ImageBackendFixture()) with mock.patch.object(drvr, '_get_connection', return_value=mock_connection): drvr.spawn(self.context, instance, image_meta, [], None) def _test_create_image_plain(self, os_type='', filename='', mkfs=False): gotFiles = [] def fake_none(*args, **kwargs): return def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) instance['os_type'] = os_type drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) if mkfs: self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {os_type: 'mkfs.ext4 --label %(fs_label)s %(target)s'}) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) self.useFixture( fake_imagebackend.ImageBackendFixture(got_files=gotFiles)) drvr._create_image(self.context, instance, disk_info['mapping']) drvr._get_guest_xml(self.context, instance, None, disk_info, image_meta) wantFiles = [ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab', 'size': 10 * units.Gi}, {'filename': filename, 'size': 20 * units.Gi}, ] self.assertEqual(gotFiles, wantFiles) def test_create_image_plain_os_type_blank(self): self._test_create_image_plain(os_type='', filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_none(self): self._test_create_image_plain(os_type=None, filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_set_no_fs(self): self._test_create_image_plain(os_type='test', filename=self._EPHEMERAL_20_DEFAULT, mkfs=False) def test_create_image_plain_os_type_set_with_fs(self): ephemeral_file_name = ('ephemeral_20_%s' % utils.get_hash_str( 'mkfs.ext4 --label %(fs_label)s %(target)s')[:7]) self._test_create_image_plain(os_type='test', filename=ephemeral_file_name, mkfs=True) def test_create_image_initrd(self): kernel_id = uuids.kernel_id ramdisk_id = uuids.ramdisk_id kernel_fname = imagecache.get_cache_fname(kernel_id) ramdisk_fname = imagecache.get_cache_fname(ramdisk_id) filename = self._EPHEMERAL_20_DEFAULT gotFiles = [] instance_ref = self.test_instance instance_ref['image_ref'] = uuids.instance_id instance_ref['kernel_id'] = uuids.kernel_id instance_ref['ramdisk_id'] = uuids.ramdisk_id instance_ref['os_type'] = 'test' instance = objects.Instance(**instance_ref) driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_backend = self.useFixture( fake_imagebackend.ImageBackendFixture(got_files=gotFiles)) with test.nested( mock.patch.object(driver, '_get_guest_xml'), mock.patch.object(driver, '_create_domain_and_network'), mock.patch.object(driver, 'get_info', return_value=[hardware.InstanceInfo(state=power_state.RUNNING)]) ): image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) driver._create_image(self.context, instance, disk_info['mapping']) # Assert that kernel and ramdisk were fetched with fetch_raw_image # and no size for name, disk in six.iteritems(fake_backend.disks): cache = disk.cache if name in ('kernel', 'ramdisk'): cache.assert_called_once_with( context=self.context, filename=mock.ANY, image_id=mock.ANY, fetch_func=fake_libvirt_utils.fetch_raw_image) wantFiles = [ {'filename': kernel_fname, 'size': None}, {'filename': ramdisk_fname, 'size': None}, {'filename': imagecache.get_cache_fname(uuids.instance_id), 'size': 10 * units.Gi}, {'filename': filename, 'size': 20 * units.Gi}, ] self.assertEqual(wantFiles, gotFiles) def _create_image_helper(self, callback, exists=None, suffix='', test_create_configdrive=False): def fake_none(*args, **kwargs): return def fake_get_info(instance): return hardware.InstanceInfo(state=power_state.RUNNING) instance_ref = self.test_instance instance_ref['image_ref'] = 1 # NOTE(mikal): use this callback to tweak the instance to match # what you're trying to test callback(instance_ref) instance = objects.Instance(**instance_ref) # Turn on some swap to exercise that codepath in _create_image instance.flavor.swap = 500 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_none) self.stubs.Set(drvr, '_create_domain_and_network', fake_none) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(instance_metadata, 'InstanceMetadata', fake_none) self.stubs.Set(nova.virt.configdrive.ConfigDriveBuilder, 'make_drive', fake_none) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) gotFiles = [] imported_files = [] self.useFixture(fake_imagebackend.ImageBackendFixture( got_files=gotFiles, imported_files=imported_files, exists=exists)) if test_create_configdrive: drvr._create_configdrive(self.context, instance) else: drvr._create_image(self.context, instance, disk_info['mapping'], suffix=suffix) drvr._get_guest_xml(self.context, instance, None, disk_info, image_meta) return gotFiles, imported_files def test_create_image_with_swap(self): def enable_swap(instance_ref): # Turn on some swap to exercise that codepath in _create_image instance_ref['system_metadata']['instance_type_swap'] = 500 gotFiles, _ = self._create_image_helper(enable_swap) wantFiles = [ {'filename': '356a192b7913b04c54574d18c28d46e6395428ab', 'size': 10 * units.Gi}, {'filename': self._EPHEMERAL_20_DEFAULT, 'size': 20 * units.Gi}, {'filename': 'swap_500', 'size': 500 * units.Mi}, ] self.assertEqual(gotFiles, wantFiles) @mock.patch( 'nova.virt.libvirt.driver.LibvirtDriver._build_device_metadata', return_value=None) def test_create_configdrive(self, mock_save): def enable_configdrive(instance_ref): instance_ref['config_drive'] = 'true' # Ensure that we create a config drive and then import it into the # image backend store _, imported_files = self._create_image_helper( enable_configdrive, exists=lambda name: False, test_create_configdrive=True) self.assertTrue(imported_files[0][0].endswith('/disk.config')) self.assertEqual('disk.config', imported_files[0][1]) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache', side_effect=exception.ImageNotFound(image_id='fake-id')) def test_create_image_not_exist_no_fallback(self, mock_cache): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) self.assertRaises(exception.ImageNotFound, drvr._create_image, self.context, instance, disk_info['mapping']) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache') def test_create_image_not_exist_fallback(self, mock_cache): def side_effect(fetch_func, filename, size=None, *args, **kwargs): def second_call(fetch_func, filename, size=None, *args, **kwargs): # call copy_from_host ourselves because we mocked image.cache() fetch_func('fake-target') # further calls have no side effect mock_cache.side_effect = None mock_cache.side_effect = second_call # raise an error only the first call raise exception.ImageNotFound(image_id='fake-id') mock_cache.side_effect = side_effect drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) with mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image') as mock_copy: drvr._create_image(self.context, instance, disk_info['mapping'], fallback_from_host='fake-source-host') mock_copy.assert_called_once_with(src='fake-target', dest='fake-target', host='fake-source-host', receive=True) @mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache') def test_create_image_resize_snap_backend(self, mock_cache): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) instance.task_state = task_states.RESIZE_FINISH image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) fake_backend = self.useFixture(fake_imagebackend.ImageBackendFixture()) drvr._create_image(self.context, instance, disk_info['mapping']) # Assert we called create_snap on the root disk fake_backend.disks['disk'].create_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME) @mock.patch.object(utils, 'execute') def test_create_ephemeral_specified_fs(self, mock_exec): self.flags(default_ephemeral_format='ext3') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True, specified_fs='ext4') mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) def test_create_ephemeral_specified_fs_not_valid(self): CONF.set_override('default_ephemeral_format', 'ext4') ephemerals = [{'device_type': 'disk', 'disk_bus': 'virtio', 'device_name': '/dev/vdb', 'guest_format': 'dummy', 'size': 1}] block_device_info = { 'ephemerals': ephemerals} instance_ref = self.test_instance instance_ref['image_ref'] = 1 instance = objects.Instance(**instance_ref) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) image_meta = objects.ImageMeta.from_dict({'disk_format': 'raw'}) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta) disk_info['mapping'].pop('disk.local') with test.nested( mock.patch.object(utils, 'execute'), mock.patch.object(drvr, 'get_info'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(imagebackend.Image, 'verify_base_size'), mock.patch.object(imagebackend.Image, 'get_disk_size')): self.assertRaises(exception.InvalidBDMFormat, drvr._create_image, context, instance, disk_info['mapping'], block_device_info=block_device_info) def test_create_ephemeral_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_ephemeral_with_conf(self): CONF.set_override('default_ephemeral_format', 'ext4') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_ephemeral_with_arbitrary(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {'linux': 'mkfs.ext4 --label %(fs_label)s %(target)s'}) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs.ext4', '--label', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_ephemeral_with_ext3(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND', {'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'}) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something', run_as_root=True) self.mox.ReplayAll() drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux', is_block_dev=True) def test_create_swap_default(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.mox.StubOutWithMock(utils, 'execute') utils.execute('mkswap', '/dev/something', run_as_root=False) self.mox.ReplayAll() drvr._create_swap('/dev/something', 1) def test_get_console_output_file(self): fake_libvirt_utils.files['console.log'] = '01234567890' with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_dir = (os.path.join(tmpdir, instance['name'])) console_log = '%s/console.log' % (console_dir) fake_dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> <console type='file'> <source path='%s'/> <target port='0'/> </console> </devices> </domain> """ % console_log def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) try: prev_max = libvirt_driver.MAX_CONSOLE_BYTES libvirt_driver.MAX_CONSOLE_BYTES = 5 with mock.patch('os.path.exists', return_value=True): output = drvr.get_console_output(self.context, instance) finally: libvirt_driver.MAX_CONSOLE_BYTES = prev_max self.assertEqual('67890', output) def test_get_console_output_file_missing(self): with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_log = os.path.join(tmpdir, instance['name'], 'non-existent.log') fake_dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> <console type='file'> <source path='%s'/> <target port='0'/> </console> </devices> </domain> """ % console_log def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch('os.path.exists', return_value=False): output = drvr.get_console_output(self.context, instance) self.assertEqual('', output) def test_get_console_output_pty(self): fake_libvirt_utils.files['pty'] = '01234567890' with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 instance = objects.Instance(**instance_ref) console_dir = (os.path.join(tmpdir, instance['name'])) pty_file = '%s/fake_pty' % (console_dir) fake_dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> <console type='pty'> <source path='%s'/> <target port='0'/> </console> </devices> </domain> """ % pty_file def fake_lookup(id): return FakeVirtDomain(fake_dom_xml) def _fake_flush(self, fake_pty): return 'foo' def _fake_append_to_file(self, data, fpath): return 'pty' self.create_fake_libvirt_mock() libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) try: prev_max = libvirt_driver.MAX_CONSOLE_BYTES libvirt_driver.MAX_CONSOLE_BYTES = 5 output = drvr.get_console_output(self.context, instance) finally: libvirt_driver.MAX_CONSOLE_BYTES = prev_max self.assertEqual('67890', output) @mock.patch('nova.virt.libvirt.host.Host.get_domain') @mock.patch.object(libvirt_guest.Guest, "get_xml_desc") def test_get_console_output_not_available(self, mock_get_xml, get_domain): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> </disk> <console type='foo'> <source path='srcpath'/> <target port='0'/> </console> </devices> </domain> """ mock_get_xml.return_value = xml get_domain.return_value = mock.MagicMock() instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleNotAvailable, drvr.get_console_output, self.context, instance) def test_get_host_ip_addr(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) ip = drvr.get_host_ip_addr() self.assertEqual(ip, CONF.my_ip) @mock.patch.object(libvirt_driver.LOG, 'warning') @mock.patch('nova.compute.utils.get_machine_ips') def test_get_host_ip_addr_failure(self, mock_ips, mock_log): mock_ips.return_value = ['8.8.8.8', '75.75.75.75'] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.get_host_ip_addr() mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was ' u'not found on any of the ' u'interfaces: %(ifaces)s', {'ifaces': '8.8.8.8, 75.75.75.75', 'my_ip': mock.ANY}) def test_conn_event_handler(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) service_mock = mock.MagicMock() service_mock.disabled.return_value = False with test.nested( mock.patch.object(drvr._host, "_connect", side_effect=fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Failed to connect to host", error_code= fakelibvirt.VIR_ERR_INTERNAL_ERROR)), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock)): # verify that the driver registers for the close callback # and re-connects after receiving the callback self.assertRaises(exception.HypervisorUnavailable, drvr.init_host, "wibble") self.assertTrue(service_mock.disabled) def test_command_with_broken_connection(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) service_mock = mock.MagicMock() service_mock.disabled.return_value = False with test.nested( mock.patch.object(drvr._host, "_connect", side_effect=fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "Failed to connect to host", error_code= fakelibvirt.VIR_ERR_INTERNAL_ERROR)), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(host.Host, "has_min_version", return_value=True), mock.patch.object(drvr, "_do_quality_warnings", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock), mock.patch.object(host.Host, "get_capabilities")): drvr.init_host("wibble") self.assertRaises(exception.HypervisorUnavailable, drvr.get_num_instances) self.assertTrue(service_mock.disabled) def test_service_resume_after_broken_connection(self): self.mox.UnsetStubs() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) service_mock = mock.MagicMock() service_mock.disabled.return_value = True with test.nested( mock.patch.object(drvr._host, "_connect", return_value=mock.MagicMock()), mock.patch.object(drvr._host, "_init_events", return_value=None), mock.patch.object(host.Host, "has_min_version", return_value=True), mock.patch.object(drvr, "_do_quality_warnings", return_value=None), mock.patch.object(objects.Service, "get_by_compute_host", return_value=service_mock), mock.patch.object(host.Host, "get_capabilities")): drvr.init_host("wibble") drvr.get_num_instances() self.assertTrue(not service_mock.disabled and service_mock.disabled_reason is None) @mock.patch.object(objects.Instance, 'save') def test_immediate_delete(self, mock_save): def fake_get_domain(instance): raise exception.InstanceNotFound(instance_id=instance.uuid) def fake_delete_instance_files(instance): pass drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, {}) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'get_by_uuid') @mock.patch.object(objects.Instance, 'obj_load_attr', autospec=True) @mock.patch.object(objects.Instance, 'save', autospec=True) @mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files') @mock.patch.object(libvirt_driver.LibvirtDriver, '_disconnect_volume') @mock.patch.object(driver, 'block_device_info_get_mapping') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def _test_destroy_removes_disk(self, mock_undefine_domain, mock_mapping, mock_disconnect_volume, mock_delete_instance_files, mock_destroy, mock_inst_save, mock_inst_obj_load_attr, mock_get_by_uuid, volume_fail=False): instance = objects.Instance(self.context, **self.test_instance) vol = {'block_device_mapping': [ {'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]} mock_mapping.return_value = vol['block_device_mapping'] mock_delete_instance_files.return_value = True mock_get_by_uuid.return_value = instance if volume_fail: mock_disconnect_volume.return_value = ( exception.VolumeNotFound('vol')) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.destroy(self.context, instance, [], vol) def test_destroy_removes_disk(self): self._test_destroy_removes_disk(volume_fail=False) def test_destroy_removes_disk_volume_fails(self): self._test_destroy_removes_disk(volume_fail=True) @mock.patch.object(libvirt_driver.LibvirtDriver, 'unplug_vifs') @mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def test_destroy_not_removes_disk(self, mock_undefine_domain, mock_destroy, mock_unplug_vifs): instance = fake_instance.fake_instance_obj( None, name='instancename', id=1, uuid='875a8070-d0b9-4949-8b31-104d125c9a64') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.destroy(self.context, instance, [], None, False) @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup') @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container') @mock.patch.object(host.Host, 'get_domain') def test_destroy_lxc_calls_teardown_container(self, mock_get_domain, mock_teardown_container, mock_cleanup): self.flags(virt_type='lxc', group='libvirt') fake_domain = FakeVirtDomain() def destroy_side_effect(*args, **kwargs): fake_domain._info[0] = power_state.SHUTDOWN with mock.patch.object(fake_domain, 'destroy', side_effect=destroy_side_effect) as mock_domain_destroy: mock_get_domain.return_value = fake_domain instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [] drvr.destroy(self.context, instance, network_info, None, False) mock_get_domain.assert_has_calls([mock.call(instance), mock.call(instance)]) mock_domain_destroy.assert_called_once_with() mock_teardown_container.assert_called_once_with(instance) mock_cleanup.assert_called_once_with(self.context, instance, network_info, None, False, None) @mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup') @mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container') @mock.patch.object(host.Host, 'get_domain') def test_destroy_lxc_calls_teardown_container_when_no_domain(self, mock_get_domain, mock_teardown_container, mock_cleanup): self.flags(virt_type='lxc', group='libvirt') instance = objects.Instance(**self.test_instance) inf_exception = exception.InstanceNotFound(instance_id=instance.uuid) mock_get_domain.side_effect = inf_exception drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [] drvr.destroy(self.context, instance, network_info, None, False) mock_get_domain.assert_has_calls([mock.call(instance), mock.call(instance)]) mock_teardown_container.assert_called_once_with(instance) mock_cleanup.assert_called_once_with(self.context, instance, network_info, None, False, None) def test_reboot_different_ids(self): class FakeLoopingCall(object): def start(self, *a, **k): return self def wait(self): return None self.flags(wait_soft_reboot_seconds=1, group='libvirt') info_tuple = ('fake', 'fake', 'fake', 'also_fake') self.reboot_create_called = False # Mock domain mock_domain = self.mox.CreateMock(fakelibvirt.virDomain) mock_domain.info().AndReturn( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) mock_domain.ID().AndReturn('some_fake_id') mock_domain.ID().AndReturn('some_fake_id') mock_domain.shutdown() mock_domain.info().AndReturn( (libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple) mock_domain.ID().AndReturn('some_other_fake_id') mock_domain.ID().AndReturn('some_other_fake_id') self.mox.ReplayAll() def fake_get_domain(instance): return mock_domain def fake_create_domain(**kwargs): self.reboot_create_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, '_create_domain', fake_create_domain) self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall', lambda *a, **k: FakeLoopingCall()) self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: []) drvr.reboot(None, instance, [], 'SOFT') self.assertTrue(self.reboot_create_called) @mock.patch.object(pci_manager, 'get_instance_pci_devs') @mock.patch.object(loopingcall, 'FixedIntervalLoopingCall') @mock.patch.object(greenthread, 'sleep') @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot, mock_sleep, mock_loopingcall, mock_get_instance_pci_devs): class FakeLoopingCall(object): def start(self, *a, **k): return self def wait(self): return None self.flags(wait_soft_reboot_seconds=1, group='libvirt') info_tuple = ('fake', 'fake', 'fake', 'also_fake') self.reboot_hard_reboot_called = False # Mock domain mock_domain = mock.Mock(fakelibvirt.virDomain) return_values = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple, (libvirt_guest.VIR_DOMAIN_CRASHED,) + info_tuple] mock_domain.info.side_effect = return_values mock_domain.ID.return_value = 'some_fake_id' mock_domain.shutdown.side_effect = mock.Mock() def fake_hard_reboot(*args, **kwargs): self.reboot_hard_reboot_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_get_domain.return_value = mock_domain mock_hard_reboot.side_effect = fake_hard_reboot mock_loopingcall.return_value = FakeLoopingCall() mock_get_instance_pci_devs.return_value = [] drvr.reboot(None, instance, [], 'SOFT') self.assertTrue(self.reboot_hard_reboot_called) @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_soft_reboot_libvirt_exception(self, mock_get_domain, mock_hard_reboot): # Tests that a hard reboot is performed when a soft reboot results # in raising a libvirtError. info_tuple = ('fake', 'fake', 'fake', 'also_fake') # setup mocks mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.info.return_value = ( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) mock_virDomain.ID.return_value = 'some_fake_id' mock_virDomain.shutdown.side_effect = fakelibvirt.libvirtError('Err') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) context = None instance = objects.Instance(**self.test_instance) network_info = [] mock_get_domain.return_value = mock_virDomain drvr.reboot(context, instance, network_info, 'SOFT') @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def _test_resume_state_on_host_boot_with_state(self, state, mock_get_domain, mock_hard_reboot): mock_virDomain = mock.Mock(fakelibvirt.virDomain) mock_virDomain.info.return_value = ([state, None, None, None, None]) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.return_value = mock_virDomain instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) drvr.resume_state_on_host_boot(self.context, instance, network_info, block_device_info=None) ignored_states = (power_state.RUNNING, power_state.SUSPENDED, power_state.NOSTATE, power_state.PAUSED) self.assertEqual(mock_hard_reboot.called, state not in ignored_states) def test_resume_state_on_host_boot_with_running_state(self): self._test_resume_state_on_host_boot_with_state(power_state.RUNNING) def test_resume_state_on_host_boot_with_suspended_state(self): self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED) def test_resume_state_on_host_boot_with_paused_state(self): self._test_resume_state_on_host_boot_with_state(power_state.PAUSED) def test_resume_state_on_host_boot_with_nostate(self): self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE) def test_resume_state_on_host_boot_with_shutdown_state(self): self._test_resume_state_on_host_boot_with_state(power_state.RUNNING) def test_resume_state_on_host_boot_with_crashed_state(self): self._test_resume_state_on_host_boot_with_state(power_state.CRASHED) @mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot') @mock.patch.object(host.Host, 'get_domain') def test_resume_state_on_host_boot_with_instance_not_found_on_driver( self, mock_get_domain, mock_hard_reboot): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.side_effect = exception.InstanceNotFound( instance_id='fake') drvr.resume_state_on_host_boot(self.context, instance, network_info=[], block_device_info=None) mock_hard_reboot.assert_called_once_with(self.context, instance, [], None) @mock.patch('nova.virt.libvirt.LibvirtDriver._undefine_domain') @mock.patch('nova.virt.libvirt.LibvirtDriver.get_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info') @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy') def test_hard_reboot(self, mock_destroy, mock_get_disk_info, mock_get_instance_disk_info, mock_get_guest_xml, mock_create_images_and_backing, mock_create_domain_and_network, mock_get_info, mock_undefine): self.context.auth_token = True # any non-None value will suffice instance = objects.Instance(**self.test_instance) instance_path = libvirt_utils.get_instance_path(instance) network_info = _fake_network_info(self, 1) block_device_info = None dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) return_values = [hardware.InstanceInfo(state=power_state.SHUTDOWN), hardware.InstanceInfo(state=power_state.RUNNING)] mock_get_info.side_effect = return_values backing_disk_info = [{"virt_disk_size": 2}] mock_get_disk_info.return_value = mock.sentinel.disk_info mock_get_guest_xml.return_value = dummyxml mock_get_instance_disk_info.return_value = backing_disk_info drvr._hard_reboot(self.context, instance, network_info, block_device_info) mock_destroy.assert_called_once_with(instance) mock_undefine.assert_called_once_with(instance) # make sure that _create_images_and_backing is passed the disk_info # returned from _get_instance_disk_info and not the one that is in # scope from blockinfo.get_disk_info mock_create_images_and_backing.assert_called_once_with(self.context, instance, instance_path, backing_disk_info) # make sure that _create_domain_and_network is passed the disk_info # returned from blockinfo.get_disk_info and not the one that's # returned from _get_instance_disk_info mock_create_domain_and_network.assert_called_once_with(self.context, dummyxml, instance, network_info, mock.sentinel.disk_info, block_device_info=block_device_info, reboot=True, vifs_already_plugged=True) @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall') @mock.patch('nova.pci.manager.get_instance_pci_devs') @mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network') @mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info') @mock.patch('nova.virt.libvirt.utils.write_to_file') @mock.patch('nova.virt.libvirt.utils.get_instance_path') @mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config') @mock.patch('nova.virt.libvirt.blockinfo.get_disk_info') @mock.patch('nova.virt.libvirt.LibvirtDriver._destroy') def test_hard_reboot_does_not_call_glance_show(self, mock_destroy, mock_get_disk_info, mock_get_guest_config, mock_get_instance_path, mock_write_to_file, mock_get_instance_disk_info, mock_create_images_and_backing, mock_create_domand_and_network, mock_prepare_pci_devices_for_use, mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree): """For a hard reboot, we shouldn't need an additional call to glance to get the image metadata. This is important for automatically spinning up instances on a host-reboot, since we won't have a user request context that'll allow the Glance request to go through. We have to rely on the cached image metadata, instead. https://bugs.launchpad.net/nova/+bug/1339386 """ drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) network_info = mock.MagicMock() block_device_info = mock.MagicMock() mock_get_disk_info.return_value = {} mock_get_guest_config.return_value = mock.MagicMock() mock_get_instance_path.return_value = '/foo' mock_looping_call.return_value = mock.MagicMock() drvr._image_api = mock.MagicMock() drvr._hard_reboot(self.context, instance, network_info, block_device_info) self.assertFalse(drvr._image_api.get.called) mock_ensure_tree.assert_called_once_with('/foo') def test_suspend(self): guest = libvirt_guest.Guest(FakeVirtDomain(id=1)) dom = guest._domain instance = objects.Instance(**self.test_instance) instance.ephemeral_key_uuid = None conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) @mock.patch.object(dmcrypt, 'delete_volume') @mock.patch.object(conn, '_get_instance_disk_info', return_value=[]) @mock.patch.object(conn, '_detach_sriov_ports') @mock.patch.object(conn, '_detach_pci_devices') @mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value='pci devs') @mock.patch.object(conn._host, 'get_guest', return_value=guest) def suspend(mock_get_guest, mock_get_instance_pci_devs, mock_detach_pci_devices, mock_detach_sriov_ports, mock_get_instance_disk_info, mock_delete_volume): mock_managedSave = mock.Mock() dom.managedSave = mock_managedSave conn.suspend(self.context, instance) mock_managedSave.assert_called_once_with(0) self.assertFalse(mock_get_instance_disk_info.called) mock_delete_volume.assert_has_calls([mock.call(disk['path']) for disk in mock_get_instance_disk_info.return_value], False) suspend() @mock.patch.object(time, 'sleep') @mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain') @mock.patch.object(host.Host, 'get_domain') def _test_clean_shutdown(self, mock_get_domain, mock_create_domain, mock_sleep, seconds_to_shutdown, timeout, retry_interval, shutdown_attempts, succeeds): info_tuple = ('fake', 'fake', 'fake', 'also_fake') shutdown_count = [] # Mock domain mock_domain = mock.Mock(fakelibvirt.virDomain) return_infos = [(libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple] return_shutdowns = [shutdown_count.append("shutdown")] retry_countdown = retry_interval for x in range(min(seconds_to_shutdown, timeout)): return_infos.append( (libvirt_guest.VIR_DOMAIN_RUNNING,) + info_tuple) if retry_countdown == 0: return_shutdowns.append(shutdown_count.append("shutdown")) retry_countdown = retry_interval else: retry_countdown -= 1 if seconds_to_shutdown < timeout: return_infos.append( (libvirt_guest.VIR_DOMAIN_SHUTDOWN,) + info_tuple) mock_domain.info.side_effect = return_infos mock_domain.shutdown.side_effect = return_shutdowns def fake_create_domain(**kwargs): self.reboot_create_called = True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_get_domain.return_value = mock_domain mock_create_domain.side_effect = fake_create_domain result = drvr._clean_shutdown(instance, timeout, retry_interval) self.assertEqual(succeeds, result) self.assertEqual(shutdown_attempts, len(shutdown_count)) def test_clean_shutdown_first_time(self): self._test_clean_shutdown(seconds_to_shutdown=2, timeout=5, retry_interval=3, shutdown_attempts=1, succeeds=True) def test_clean_shutdown_with_retry(self): self._test_clean_shutdown(seconds_to_shutdown=4, timeout=5, retry_interval=3, shutdown_attempts=2, succeeds=True) def test_clean_shutdown_failure(self): self._test_clean_shutdown(seconds_to_shutdown=6, timeout=5, retry_interval=3, shutdown_attempts=2, succeeds=False) def test_clean_shutdown_no_wait(self): self._test_clean_shutdown(seconds_to_shutdown=6, timeout=0, retry_interval=3, shutdown_attempts=1, succeeds=False) @mock.patch.object(FakeVirtDomain, 'attachDeviceFlags') @mock.patch.object(FakeVirtDomain, 'ID', return_value=1) @mock.patch.object(utils, 'get_image_from_system_metadata', return_value=None) def test_attach_sriov_ports(self, mock_get_image_metadata, mock_ID, mock_attachDevice): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT guest = libvirt_guest.Guest(FakeVirtDomain()) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._attach_sriov_ports(self.context, instance, guest, network_info) mock_get_image_metadata.assert_called_once_with( instance.system_metadata) self.assertTrue(mock_attachDevice.called) @mock.patch.object(FakeVirtDomain, 'attachDeviceFlags') @mock.patch.object(FakeVirtDomain, 'ID', return_value=1) @mock.patch.object(utils, 'get_image_from_system_metadata', return_value=None) def test_attach_sriov_direct_physical_ports(self, mock_get_image_metadata, mock_ID, mock_attachDevice): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT_PHYSICAL guest = libvirt_guest.Guest(FakeVirtDomain()) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._attach_sriov_ports(self.context, instance, guest, network_info) mock_get_image_metadata.assert_called_once_with( instance.system_metadata) self.assertTrue(mock_attachDevice.called) @mock.patch.object(FakeVirtDomain, 'attachDeviceFlags') @mock.patch.object(FakeVirtDomain, 'ID', return_value=1) @mock.patch.object(utils, 'get_image_from_system_metadata', return_value=None) def test_attach_sriov_ports_with_info_cache(self, mock_get_image_metadata, mock_ID, mock_attachDevice): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT instance.info_cache = objects.InstanceInfoCache( network_info=network_info) guest = libvirt_guest.Guest(FakeVirtDomain()) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr._attach_sriov_ports(self.context, instance, guest, None) mock_get_image_metadata.assert_called_once_with( instance.system_metadata) self.assertTrue(mock_attachDevice.called) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def _test_detach_sriov_ports(self, mock_has_min_version, vif_type): instance = objects.Instance(**self.test_instance) expeted_pci_slot = "0000:00:00.0" network_info = _fake_network_info(self, 1) network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT # some more adjustments for the fake network_info so that # the correct get_config function will be executed (vif's # get_config_hw_veb - which is according to the real SRIOV vif) # and most importantly the pci_slot which is translated to # cfg.source_dev, then to PciDevice.address and sent to # _detach_pci_devices network_info[0]['profile'] = dict(pci_slot=expeted_pci_slot) network_info[0]['type'] = vif_type network_info[0]['details'] = dict(vlan="2145") instance.info_cache = objects.InstanceInfoCache( network_info=network_info) # fill the pci_devices of the instance so that # pci_manager.get_instance_pci_devs will not return an empty list # which will eventually fail the assertion for detachDeviceFlags expected_pci_device_obj = ( objects.PciDevice(address=expeted_pci_slot, request_id=None)) instance.pci_devices = objects.PciDeviceList() instance.pci_devices.objects = [expected_pci_device_obj] domain = FakeVirtDomain() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest(domain) with mock.patch.object(drvr, '_detach_pci_devices') as mock_detach_pci: drvr._detach_sriov_ports(self.context, instance, guest) mock_detach_pci.assert_called_once_with( guest, [expected_pci_device_obj]) def test_detach_sriov_ports_interface_interface_hostdev(self): # Note: test detach_sriov_ports method for vif with config # LibvirtConfigGuestInterface self._test_detach_sriov_ports(vif_type="hw_veb") def test_detach_sriov_ports_interface_pci_hostdev(self): # Note: test detach_sriov_ports method for vif with config # LibvirtConfigGuestHostdevPCI self._test_detach_sriov_ports(vif_type="ib_hostdev") @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(FakeVirtDomain, 'detachDeviceFlags') def test_detach_duplicate_mac_sriov_ports(self, mock_detachDeviceFlags, mock_has_min_version): instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 2) for network_info_inst in network_info: network_info_inst['vnic_type'] = network_model.VNIC_TYPE_DIRECT network_info_inst['type'] = "hw_veb" network_info_inst['details'] = dict(vlan="2145") network_info_inst['address'] = "fa:16:3e:96:2a:48" network_info[0]['profile'] = dict(pci_slot="0000:00:00.0") network_info[1]['profile'] = dict(pci_slot="0000:00:00.1") instance.info_cache = objects.InstanceInfoCache( network_info=network_info) # fill the pci_devices of the instance so that # pci_manager.get_instance_pci_devs will not return an empty list # which will eventually fail the assertion for detachDeviceFlags instance.pci_devices = objects.PciDeviceList() instance.pci_devices.objects = [ objects.PciDevice(address='0000:00:00.0', request_id=None), objects.PciDevice(address='0000:00:00.1', request_id=None) ] domain = FakeVirtDomain() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest(domain) drvr._detach_sriov_ports(self.context, instance, guest) expected_xml = [ ('<hostdev mode="subsystem" type="pci" managed="yes">\n' ' <source>\n' ' <address bus="0x00" domain="0x0000" \ function="0x0" slot="0x00"/>\n' ' </source>\n' '</hostdev>\n'), ('<hostdev mode="subsystem" type="pci" managed="yes">\n' ' <source>\n' ' <address bus="0x00" domain="0x0000" \ function="0x1" slot="0x00"/>\n' ' </source>\n' '</hostdev>\n') ] mock_detachDeviceFlags.has_calls([ mock.call(expected_xml[0], flags=1), mock.call(expected_xml[1], flags=1) ]) def test_resume(self): dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='file'><driver name='qemu' type='raw'/>" "<source file='/test/disk'/>" "<target dev='vda' bus='virtio'/></disk>" "<disk type='file'><driver name='qemu' type='qcow2'/>" "<source file='/test/disk.local'/>" "<target dev='vdb' bus='virtio'/></disk>" "</devices></domain>") instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) block_device_info = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) guest = libvirt_guest.Guest('fake_dom') with test.nested( mock.patch.object(drvr, '_get_existing_domain_xml', return_value=dummyxml), mock.patch.object(drvr, '_create_domain_and_network', return_value=guest), mock.patch.object(drvr, '_attach_pci_devices'), mock.patch.object(pci_manager, 'get_instance_pci_devs', return_value='fake_pci_devs'), mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(blockinfo, 'get_disk_info'), ) as (_get_existing_domain_xml, _create_domain_and_network, _attach_pci_devices, get_instance_pci_devs, get_image_metadata, get_disk_info): get_image_metadata.return_value = {'bar': 234} disk_info = {'foo': 123} get_disk_info.return_value = disk_info drvr.resume(self.context, instance, network_info, block_device_info) _get_existing_domain_xml.assert_has_calls([mock.call(instance, network_info, block_device_info)]) _create_domain_and_network.assert_has_calls([mock.call( self.context, dummyxml, instance, network_info, disk_info, block_device_info=block_device_info, vifs_already_plugged=True)]) _attach_pci_devices.assert_has_calls([mock.call(guest, 'fake_pci_devs')]) @mock.patch.object(host.Host, 'get_domain') @mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files') @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines(self, mock_save, mock_delete_instance_files, mock_get_info, mock_get_domain): dom_mock = mock.MagicMock() dom_mock.undefineFlags.return_value = 1 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.return_value = dom_mock mock_get_info.return_value = hardware.InstanceInfo( state=power_state.SHUTDOWN, id=-1) mock_delete_instance_files.return_value = None instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(rbd_utils, 'RBDDriver') def test_cleanup_rbd(self, mock_driver): driver = mock_driver.return_value driver.cleanup_volumes = mock.Mock() fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'} drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr._cleanup_rbd(fake_instance) driver.cleanup_volumes.assert_called_once_with(fake_instance) @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_undefine_flags(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(fakelibvirt.libvirtError('Err')) mock.ID().AndReturn(123) mock.undefine() self.mox.ReplayAll() def fake_get_domain(instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_attribute_with_managed_save(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(AttributeError()) mock.hasManagedSaveImage(0).AndReturn(True) mock.managedSaveRemove(0) mock.undefine() self.mox.ReplayAll() def fake_get_domain(instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() @mock.patch.object(objects.Instance, 'save') def test_destroy_undefines_no_attribute_no_managed_save(self, mock_save): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy() mock.undefineFlags(1).AndRaise(AttributeError()) mock.hasManagedSaveImage(0).AndRaise(AttributeError()) mock.undefine() self.mox.ReplayAll() def fake_get_domain(self, instance): return mock def fake_get_info(instance_name): return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_delete_instance_files(instance): return None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.stubs.Set(drvr, 'get_info', fake_get_info) self.stubs.Set(drvr, 'delete_instance_files', fake_delete_instance_files) instance = objects.Instance(self.context, **self.test_instance) drvr.destroy(self.context, instance, []) mock_save.assert_called_once_with() def test_destroy_timed_out(self): mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy().AndRaise(fakelibvirt.libvirtError("timed out")) self.mox.ReplayAll() def fake_get_domain(self, instance): return mock def fake_get_error_code(self): return fakelibvirt.VIR_ERR_OPERATION_TIMEOUT drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.stubs.Set(fakelibvirt.libvirtError, 'get_error_code', fake_get_error_code) instance = objects.Instance(**self.test_instance) self.assertRaises(exception.InstancePowerOffFailure, drvr.destroy, self.context, instance, []) def test_private_destroy_not_found(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "No such domain", error_code=fakelibvirt.VIR_ERR_NO_DOMAIN) mock = self.mox.CreateMock(fakelibvirt.virDomain) mock.ID() mock.destroy().AndRaise(ex) mock.info().AndRaise(ex) mock.UUIDString() self.mox.ReplayAll() def fake_get_domain(instance): return mock drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr._host, 'get_domain', fake_get_domain) instance = objects.Instance(**self.test_instance) # NOTE(vish): verifies destroy doesn't raise if the instance disappears drvr._destroy(instance) def test_private_destroy_lxc_processes_refused_to_die(self): self.flags(virt_type='lxc', group='libvirt') ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "", error_message="internal error: Some processes refused to die", error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(conn._host, 'get_domain') as mock_get_domain, \ mock.patch.object(conn, 'get_info') as mock_get_info: mock_domain = mock.MagicMock() mock_domain.ID.return_value = 1 mock_get_domain.return_value = mock_domain mock_domain.destroy.side_effect = ex mock_info = mock.MagicMock() mock_info.id = 1 mock_info.state = power_state.SHUTDOWN mock_get_info.return_value = mock_info instance = objects.Instance(**self.test_instance) conn._destroy(instance) def test_private_destroy_processes_refused_to_die_still_raises(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, "", error_message="internal error: Some processes refused to die", error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR) conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(conn._host, 'get_domain') as mock_get_domain: mock_domain = mock.MagicMock() mock_domain.ID.return_value = 1 mock_get_domain.return_value = mock_domain mock_domain.destroy.side_effect = ex instance = objects.Instance(**self.test_instance) self.assertRaises(fakelibvirt.libvirtError, conn._destroy, instance) def test_private_destroy_ebusy_timeout(self): # Tests that _destroy will retry 3 times to destroy the guest when an # EBUSY is raised, but eventually times out and raises the libvirtError ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, ("Failed to terminate process 26425 with SIGKILL: " "Device or resource busy"), error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR, int1=errno.EBUSY) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.poweroff = mock.Mock(side_effect=ex) instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(fakelibvirt.libvirtError, drvr._destroy, instance) self.assertEqual(3, mock_guest.poweroff.call_count) def test_private_destroy_ebusy_multiple_attempt_ok(self): # Tests that the _destroy attempt loop is broken when EBUSY is no # longer raised. ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, ("Failed to terminate process 26425 with SIGKILL: " "Device or resource busy"), error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR, int1=errno.EBUSY) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.poweroff = mock.Mock(side_effect=[ex, None]) inst_info = hardware.InstanceInfo(power_state.SHUTDOWN, id=1) instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(drvr._host, 'get_guest', return_value=mock_guest): with mock.patch.object(drvr, 'get_info', return_value=inst_info): drvr._destroy(instance) self.assertEqual(2, mock_guest.poweroff.call_count) def test_undefine_domain_with_not_found_instance(self): def fake_get_domain(self, instance): raise exception.InstanceNotFound(instance_id=instance.uuid) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.mox.StubOutWithMock(fakelibvirt.libvirtError, "get_error_code") self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) # NOTE(wenjianhn): verifies undefine doesn't raise if the # instance disappears drvr._undefine_domain(instance) @mock.patch.object(host.Host, "list_instance_domains") @mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid") @mock.patch.object(objects.InstanceList, "get_by_filters") def test_disk_over_committed_size_total(self, mock_get, mock_bdms, mock_list): # Ensure destroy calls managedSaveRemove for saved instance. class DiagFakeDomain(object): def __init__(self, name): self._name = name self._uuid = str(uuid.uuid4()) def ID(self): return 1 def name(self): return self._name def UUIDString(self): return self._uuid def XMLDesc(self, flags): return "<domain/>" instance_domains = [ DiagFakeDomain("instance0000001"), DiagFakeDomain("instance0000002")] mock_list.return_value = instance_domains drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_disks = {'instance0000001': [{'type': 'qcow2', 'path': '/somepath/disk1', 'virt_disk_size': '10737418240', 'backing_file': '/somepath/disk1', 'disk_size': '83886080', 'over_committed_disk_size': '10653532160'}], 'instance0000002': [{'type': 'raw', 'path': '/somepath/disk2', 'virt_disk_size': '0', 'backing_file': '/somepath/disk2', 'disk_size': '10737418240', 'over_committed_disk_size': '0'}]} def get_info(instance_name, xml, **kwargs): return fake_disks.get(instance_name) instance_uuids = [dom.UUIDString() for dom in instance_domains] instances = [objects.Instance( uuid=instance_uuids[0], root_device_name='/dev/vda'), objects.Instance( uuid=instance_uuids[1], root_device_name='/dev/vdb') ] mock_get.return_value = instances with mock.patch.object(drvr, "_get_instance_disk_info") as mock_info: mock_info.side_effect = get_info result = drvr._get_disk_over_committed_size_total() self.assertEqual(result, 10653532160) mock_list.assert_called_once_with() self.assertEqual(2, mock_info.call_count) filters = {'uuid': instance_uuids} mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True) mock_bdms.assert_called_with(mock.ANY, instance_uuids) @mock.patch.object(host.Host, "list_instance_domains") @mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid") @mock.patch.object(objects.InstanceList, "get_by_filters") def test_disk_over_committed_size_total_eperm(self, mock_get, mock_bdms, mock_list): # Ensure destroy calls managedSaveRemove for saved instance. class DiagFakeDomain(object): def __init__(self, name): self._name = name self._uuid = str(uuid.uuid4()) def ID(self): return 1 def name(self): return self._name def UUIDString(self): return self._uuid def XMLDesc(self, flags): return "<domain/>" instance_domains = [ DiagFakeDomain("instance0000001"), DiagFakeDomain("instance0000002"), DiagFakeDomain("instance0000003"), DiagFakeDomain("instance0000004")] mock_list.return_value = instance_domains drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) fake_disks = {'instance0000001': [{'type': 'qcow2', 'path': '/somepath/disk1', 'virt_disk_size': '10737418240', 'backing_file': '/somepath/disk1', 'disk_size': '83886080', 'over_committed_disk_size': '10653532160'}], 'instance0000002': [{'type': 'raw', 'path': '/somepath/disk2', 'virt_disk_size': '0', 'backing_file': '/somepath/disk2', 'disk_size': '10737418240', 'over_committed_disk_size': '21474836480'}], 'instance0000003': [{'type': 'raw', 'path': '/somepath/disk3', 'virt_disk_size': '0', 'backing_file': '/somepath/disk3', 'disk_size': '21474836480', 'over_committed_disk_size': '32212254720'}], 'instance0000004': [{'type': 'raw', 'path': '/somepath/disk4', 'virt_disk_size': '0', 'backing_file': '/somepath/disk4', 'disk_size': '32212254720', 'over_committed_disk_size': '42949672960'}]} def side_effect(name, dom, block_device_info): if name == 'instance0000001': self.assertEqual('/dev/vda', block_device_info['root_device_name']) raise OSError(errno.ENOENT, 'No such file or directory') if name == 'instance0000002': self.assertEqual('/dev/vdb', block_device_info['root_device_name']) raise OSError(errno.ESTALE, 'Stale NFS file handle') if name == 'instance0000003': self.assertEqual('/dev/vdc', block_device_info['root_device_name']) raise OSError(errno.EACCES, 'Permission denied') if name == 'instance0000004': self.assertEqual('/dev/vdd', block_device_info['root_device_name']) return fake_disks.get(name) get_disk_info = mock.Mock() get_disk_info.side_effect = side_effect drvr._get_instance_disk_info = get_disk_info instance_uuids = [dom.UUIDString() for dom in instance_domains] instances = [objects.Instance( uuid=instance_uuids[0], root_device_name='/dev/vda'), objects.Instance( uuid=instance_uuids[1], root_device_name='/dev/vdb'), objects.Instance( uuid=instance_uuids[2], root_device_name='/dev/vdc'), objects.Instance( uuid=instance_uuids[3], root_device_name='/dev/vdd') ] mock_get.return_value = instances result = drvr._get_disk_over_committed_size_total() self.assertEqual(42949672960, result) mock_list.assert_called_once_with() self.assertEqual(4, get_disk_info.call_count) filters = {'uuid': instance_uuids} mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True) mock_bdms.assert_called_with(mock.ANY, instance_uuids) @mock.patch.object(host.Host, "list_instance_domains", return_value=[mock.MagicMock(name='foo')]) @mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info", side_effect=exception.VolumeBDMPathNotFound(path='bar')) @mock.patch.object(objects.BlockDeviceMappingList, "bdms_by_instance_uuid") @mock.patch.object(objects.InstanceList, "get_by_filters") def test_disk_over_committed_size_total_bdm_not_found(self, mock_get, mock_bdms, mock_get_disk_info, mock_list_domains): # Tests that we handle VolumeBDMPathNotFound gracefully. drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(0, drvr._get_disk_over_committed_size_total()) def test_cpu_info(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) def get_host_capabilities_stub(self): cpu = vconfig.LibvirtConfigCPU() cpu.model = "Opteron_G4" cpu.vendor = "AMD" cpu.arch = arch.X86_64 cpu.cells = 1 cpu.cores = 2 cpu.threads = 1 cpu.sockets = 4 cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic")) cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow")) caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = cpu guest = vconfig.LibvirtConfigGuest() guest.ostype = vm_mode.HVM guest.arch = arch.X86_64 guest.domtype = ["kvm"] caps.guests.append(guest) guest = vconfig.LibvirtConfigGuest() guest.ostype = vm_mode.HVM guest.arch = arch.I686 guest.domtype = ["kvm"] caps.guests.append(guest) return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) want = {"vendor": "AMD", "features": set(["extapic", "3dnow"]), "model": "Opteron_G4", "arch": arch.X86_64, "topology": {"cells": 1, "cores": 2, "threads": 1, "sockets": 4}} got = drvr._get_cpu_info() self.assertEqual(want, got) def test_get_pcidev_info(self): def fake_nodeDeviceLookupByName(self, name): return FakeNodeDevice(_fake_NodeDevXml[name]) self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name') host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object( fakelibvirt.Connection, 'getLibVersion') as mock_lib_version: mock_lib_version.return_value = ( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION) - 1) actualvf = drvr._get_pcidev_info("pci_0000_04_00_3") expect_vf = { "dev_id": "pci_0000_04_00_3", "address": "0000:04:00.3", "product_id": '1521', "numa_node": None, "vendor_id": '8086', "label": 'label_8086_1521', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_04_10_7") expect_vf = { "dev_id": "pci_0000_04_10_7", "address": "0000:04:10.7", "product_id": '1520', "numa_node": None, "vendor_id": '8086', "label": 'label_8086_1520', "dev_type": fields.PciDeviceType.SRIOV_VF, "parent_addr": '0000:04:00.3', } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_04_11_7") expect_vf = { "dev_id": "pci_0000_04_11_7", "address": "0000:04:11.7", "product_id": '1520', "vendor_id": '8086', "numa_node": 0, "label": 'label_8086_1520', "dev_type": fields.PciDeviceType.SRIOV_VF, "parent_addr": '0000:04:00.3', } self.assertEqual(expect_vf, actualvf) with mock.patch.object( pci_utils, 'is_physical_function', return_value=True): actualvf = drvr._get_pcidev_info("pci_0000_04_00_1") expect_vf = { "dev_id": "pci_0000_04_00_1", "address": "0000:04:00.1", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) with mock.patch.object( pci_utils, 'is_physical_function', return_value=False): actualvf = drvr._get_pcidev_info("pci_0000_04_00_1") expect_vf = { "dev_id": "pci_0000_04_00_1", "address": "0000:04:00.1", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.STANDARD, } self.assertEqual(expect_vf, actualvf) with mock.patch.object( fakelibvirt.Connection, 'getLibVersion') as mock_lib_version: mock_lib_version.return_value = ( versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_PF_WITH_NO_VFS_CAP_VERSION)) actualvf = drvr._get_pcidev_info("pci_0000_03_00_0") expect_vf = { "dev_id": "pci_0000_03_00_0", "address": "0000:03:00.0", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) actualvf = drvr._get_pcidev_info("pci_0000_03_00_1") expect_vf = { "dev_id": "pci_0000_03_00_1", "address": "0000:03:00.1", "product_id": '1013', "numa_node": 0, "vendor_id": '15b3', "label": 'label_15b3_1013', "dev_type": fields.PciDeviceType.SRIOV_PF, } self.assertEqual(expect_vf, actualvf) def test_list_devices_not_supported(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Handle just the NO_SUPPORT error not_supported_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'this function is not supported by the connection driver:' ' virNodeNumOfDevices', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) with mock.patch.object(drvr._conn, 'listDevices', side_effect=not_supported_exc): self.assertEqual('[]', drvr._get_pci_passthrough_devices()) # We cache not supported status to avoid emitting too many logging # messages. Clear this value to test the other exception case. del drvr._list_devices_supported # Other errors should not be caught other_exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'other exc', error_code=fakelibvirt.VIR_ERR_NO_DOMAIN) with mock.patch.object(drvr._conn, 'listDevices', side_effect=other_exc): self.assertRaises(fakelibvirt.libvirtError, drvr._get_pci_passthrough_devices) def test_get_pci_passthrough_devices(self): def fakelistDevices(caps, fakeargs=0): return ['pci_0000_04_00_3', 'pci_0000_04_10_7', 'pci_0000_04_11_7'] self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn') libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices def fake_nodeDeviceLookupByName(self, name): return FakeNodeDevice(_fake_NodeDevXml[name]) self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name') host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) actjson = drvr._get_pci_passthrough_devices() expectvfs = [ { "dev_id": "pci_0000_04_00_3", "address": "0000:04:00.3", "product_id": '1521', "vendor_id": '8086', "dev_type": fields.PciDeviceType.SRIOV_PF, "phys_function": None, "numa_node": None}, { "dev_id": "pci_0000_04_10_7", "domain": 0, "address": "0000:04:10.7", "product_id": '1520', "vendor_id": '8086', "numa_node": None, "dev_type": fields.PciDeviceType.SRIOV_VF, "phys_function": [('0x0000', '0x04', '0x00', '0x3')]}, { "dev_id": "pci_0000_04_11_7", "domain": 0, "address": "0000:04:11.7", "product_id": '1520', "vendor_id": '8086', "numa_node": 0, "dev_type": fields.PciDeviceType.SRIOV_VF, "phys_function": [('0x0000', '0x04', '0x00', '0x3')], } ] actualvfs = jsonutils.loads(actjson) for dev in range(len(actualvfs)): for key in actualvfs[dev].keys(): if key not in ['phys_function', 'virt_functions', 'label']: self.assertEqual(expectvfs[dev][key], actualvfs[dev][key]) def _fake_caps_numa_topology(self, cells_per_host=4, sockets_per_cell=1, cores_per_socket=1, threads_per_core=2, kb_mem=1048576): # Generate mempages list per cell cell_mempages = list() for cellid in range(cells_per_host): mempages_0 = vconfig.LibvirtConfigCapsNUMAPages() mempages_0.size = 4 mempages_0.total = 1024 * cellid mempages_1 = vconfig.LibvirtConfigCapsNUMAPages() mempages_1.size = 2048 mempages_1.total = 0 + cellid cell_mempages.append([mempages_0, mempages_1]) topology = fakelibvirt.HostInfo._gen_numa_topology(cells_per_host, sockets_per_cell, cores_per_socket, threads_per_core, kb_mem=kb_mem, numa_mempages_list=cell_mempages) return topology def _test_get_host_numa_topology(self, mempages): caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = arch.X86_64 caps.host.topology = self._fake_caps_numa_topology() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) expected_topo_dict = {'cells': [ {'cpus': '0,1', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 0}, {'cpus': '3', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 1}, {'cpus': '', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 2}, {'cpus': '', 'cpu_usage': 0, 'mem': {'total': 256, 'used': 0}, 'id': 3}]} with test.nested( mock.patch.object(host.Host, "get_capabilities", return_value=caps), mock.patch.object( hardware, 'get_vcpu_pin_set', return_value=set([0, 1, 3, 4, 5])), mock.patch.object(host.Host, 'get_online_cpus', return_value=set([0, 1, 2, 3, 6])), ): got_topo = drvr._get_host_numa_topology() got_topo_dict = got_topo._to_dict() self.assertThat( expected_topo_dict, matchers.DictMatches(got_topo_dict)) if mempages: # cells 0 self.assertEqual(4, got_topo.cells[0].mempages[0].size_kb) self.assertEqual(0, got_topo.cells[0].mempages[0].total) self.assertEqual(2048, got_topo.cells[0].mempages[1].size_kb) self.assertEqual(0, got_topo.cells[0].mempages[1].total) # cells 1 self.assertEqual(4, got_topo.cells[1].mempages[0].size_kb) self.assertEqual(1024, got_topo.cells[1].mempages[0].total) self.assertEqual(2048, got_topo.cells[1].mempages[1].size_kb) self.assertEqual(1, got_topo.cells[1].mempages[1].total) else: self.assertEqual([], got_topo.cells[0].mempages) self.assertEqual([], got_topo.cells[1].mempages) self.assertEqual(expected_topo_dict, got_topo_dict) self.assertEqual(set([]), got_topo.cells[0].pinned_cpus) self.assertEqual(set([]), got_topo.cells[1].pinned_cpus) self.assertEqual(set([]), got_topo.cells[2].pinned_cpus) self.assertEqual(set([]), got_topo.cells[3].pinned_cpus) self.assertEqual([set([0, 1])], got_topo.cells[0].siblings) self.assertEqual([], got_topo.cells[1].siblings) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_get_host_numa_topology(self, mock_version): self._test_get_host_numa_topology(mempages=True) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_no_mempages(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='kvm', group='libvirt') mock_lib_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1 mock_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_QEMU self._test_get_host_numa_topology(mempages=False) def test_get_host_numa_topology_empty(self): caps = vconfig.LibvirtConfigCaps() caps.host = vconfig.LibvirtConfigCapsHost() caps.host.cpu = vconfig.LibvirtConfigCPU() caps.host.cpu.arch = arch.X86_64 caps.host.topology = None drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(host.Host, 'has_min_version', return_value=True), mock.patch.object(host.Host, "get_capabilities", return_value=caps) ) as (has_min_version, get_caps): self.assertIsNone(drvr._get_host_numa_topology()) self.assertEqual(2, get_caps.call_count) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_old_version(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='kvm', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_lib_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1 mock_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_QEMU self.assertIsNone(drvr._get_host_numa_topology()) @mock.patch.object(fakelibvirt.Connection, 'getType') @mock.patch.object(fakelibvirt.Connection, 'getVersion') @mock.patch.object(fakelibvirt.Connection, 'getLibVersion') def test_get_host_numa_topology_xen(self, mock_lib_version, mock_version, mock_type): self.flags(virt_type='xen', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_lib_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) mock_version.return_value = versionutils.convert_version_to_int( libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) mock_type.return_value = host.HV_DRIVER_XEN self.assertIsNone(drvr._get_host_numa_topology()) def test_diagnostic_vcpus_exception(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): raise fakelibvirt.libvirtError('vcpus missing') def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_blockstats_exception(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): raise fakelibvirt.libvirtError('blockStats missing') def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_interfacestats_exception(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): raise fakelibvirt.libvirtError('interfaceStat missing') def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_memorystats_exception(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): raise fakelibvirt.libvirtError('memoryStats missing') def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) def test_diagnostic_full(self): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self, instance): return DiagFakeDomain() self.stubs.Set(host.Host, "get_domain", fake_get_domain) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10, 'version': '1.0'} self.assertEqual(expected, actual.serialize()) @mock.patch.object(host.Host, 'get_domain') def test_diagnostic_full_with_multiple_interfaces(self, mock_get_domain): xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='filename'/> <target dev='vda' bus='virtio'/> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio'/> </disk> <interface type='network'> <mac address='52:54:00:a4:38:38'/> <source network='default'/> <target dev='vnet0'/> </interface> <interface type="bridge"> <mac address="53:55:00:a5:39:39"/> <model type="virtio"/> <target dev="br0"/> </interface> </devices> </domain> """ class DiagFakeDomain(FakeVirtDomain): def __init__(self): super(DiagFakeDomain, self).__init__(fake_xml=xml) def vcpus(self): return ([(0, 1, 15340000000, 0), (1, 1, 1640000000, 0), (2, 1, 3040000000, 0), (3, 1, 1420000000, 0)], [(True, False), (True, False), (True, False), (True, False)]) def blockStats(self, path): return (169, 688640, 0, 0, -1) def interfaceStats(self, path): return (4408, 82, 0, 0, 0, 0, 0, 0) def memoryStats(self): return {'actual': 220160, 'rss': 200164} def maxMemory(self): return 280160 def fake_get_domain(self): return DiagFakeDomain() mock_get_domain.side_effect = fake_get_domain drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) actual = drvr.get_diagnostics(instance) expect = {'cpu0_time': 15340000000, 'cpu1_time': 1640000000, 'cpu2_time': 3040000000, 'cpu3_time': 1420000000, 'vda_read': 688640, 'vda_read_req': 169, 'vda_write': 0, 'vda_write_req': 0, 'vda_errors': -1, 'vdb_read': 688640, 'vdb_read_req': 169, 'vdb_write': 0, 'vdb_write_req': 0, 'vdb_errors': -1, 'memory': 280160, 'memory-actual': 220160, 'memory-rss': 200164, 'vnet0_rx': 4408, 'vnet0_rx_drop': 0, 'vnet0_rx_errors': 0, 'vnet0_rx_packets': 82, 'vnet0_tx': 0, 'vnet0_tx_drop': 0, 'vnet0_tx_errors': 0, 'vnet0_tx_packets': 0, 'br0_rx': 4408, 'br0_rx_drop': 0, 'br0_rx_errors': 0, 'br0_rx_packets': 82, 'br0_tx': 0, 'br0_tx_drop': 0, 'br0_tx_errors': 0, 'br0_tx_packets': 0, } self.assertEqual(actual, expect) lt = datetime.datetime(2012, 11, 22, 12, 00, 00) diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10) self.useFixture(utils_fixture.TimeFixture(diags_time)) instance.launched_at = lt actual = drvr.get_instance_diagnostics(instance) expected = {'config_drive': False, 'cpu_details': [{'time': 15340000000}, {'time': 1640000000}, {'time': 3040000000}, {'time': 1420000000}], 'disk_details': [{'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}, {'errors_count': 0, 'id': '', 'read_bytes': 688640, 'read_requests': 169, 'write_bytes': 0, 'write_requests': 0}], 'driver': 'libvirt', 'hypervisor_os': 'linux', 'memory_details': {'maximum': 2048, 'used': 1234}, 'nic_details': [{'mac_address': '52:54:00:a4:38:38', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}, {'mac_address': '53:55:00:a5:39:39', 'rx_drop': 0, 'rx_errors': 0, 'rx_octets': 4408, 'rx_packets': 82, 'tx_drop': 0, 'tx_errors': 0, 'tx_octets': 0, 'tx_packets': 0}], 'state': 'running', 'uptime': 10., 'version': '1.0'} self.assertEqual(expected, actual.serialize()) @mock.patch.object(host.Host, "list_instance_domains") def test_failing_vcpu_count(self, mock_list): """Domain can fail to return the vcpu description in case it's just starting up or shutting down. Make sure None is handled gracefully. """ class DiagFakeDomain(object): def __init__(self, vcpus): self._vcpus = vcpus def vcpus(self): if self._vcpus is None: raise fakelibvirt.libvirtError("fake-error") else: return ([[1, 2, 3, 4]] * self._vcpus, [True] * self._vcpus) def ID(self): return 1 def name(self): return "instance000001" def UUIDString(self): return "19479fee-07a5-49bb-9138-d3738280d63c" mock_list.return_value = [ DiagFakeDomain(None), DiagFakeDomain(5)] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(5, drvr._get_vcpu_used()) mock_list.assert_called_with(only_guests=True, only_running=True) @mock.patch.object(host.Host, "list_instance_domains") def test_failing_vcpu_count_none(self, mock_list): """Domain will return zero if the current number of vcpus used is None. This is in case of VM state starting up or shutting down. None type returned is counted as zero. """ class DiagFakeDomain(object): def __init__(self): pass def vcpus(self): return None def ID(self): return 1 def name(self): return "instance000001" mock_list.return_value = [DiagFakeDomain()] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(0, drvr._get_vcpu_used()) mock_list.assert_called_with(only_guests=True, only_running=True) def test_get_instance_capabilities(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) def get_host_capabilities_stub(self): caps = vconfig.LibvirtConfigCaps() guest = vconfig.LibvirtConfigGuest() guest.ostype = 'hvm' guest.arch = arch.X86_64 guest.domtype = ['kvm', 'qemu'] caps.guests.append(guest) guest = vconfig.LibvirtConfigGuest() guest.ostype = 'hvm' guest.arch = arch.I686 guest.domtype = ['kvm'] caps.guests.append(guest) return caps self.stubs.Set(host.Host, "get_capabilities", get_host_capabilities_stub) want = [(arch.X86_64, 'kvm', 'hvm'), (arch.X86_64, 'qemu', 'hvm'), (arch.I686, 'kvm', 'hvm')] got = drvr._get_instance_capabilities() self.assertEqual(want, got) def test_set_cache_mode(self): self.flags(disk_cachemodes=['file=directsync'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuestDisk() fake_conf.source_type = 'file' drvr._set_cache_mode(fake_conf) self.assertEqual(fake_conf.driver_cache, 'directsync') def test_set_cache_mode_invalid_mode(self): self.flags(disk_cachemodes=['file=FAKE'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuestDisk() fake_conf.source_type = 'file' drvr._set_cache_mode(fake_conf) self.assertIsNone(fake_conf.driver_cache) def test_set_cache_mode_invalid_object(self): self.flags(disk_cachemodes=['file=directsync'], group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_conf = FakeConfigGuest() fake_conf.driver_cache = 'fake' drvr._set_cache_mode(fake_conf) self.assertEqual(fake_conf.driver_cache, 'fake') @mock.patch('os.unlink') @mock.patch.object(os.path, 'exists') def _test_shared_storage_detection(self, is_same, mock_exists, mock_unlink): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) drvr.get_host_ip_addr = mock.MagicMock(return_value='bar') mock_exists.return_value = is_same with test.nested( mock.patch.object(drvr._remotefs, 'create_file'), mock.patch.object(drvr._remotefs, 'remove_file') ) as (mock_rem_fs_create, mock_rem_fs_remove): result = drvr._is_storage_shared_with('host', '/path') mock_rem_fs_create.assert_any_call('host', mock.ANY) create_args, create_kwargs = mock_rem_fs_create.call_args self.assertTrue(create_args[1].startswith('/path')) if is_same: mock_unlink.assert_called_once_with(mock.ANY) else: mock_rem_fs_remove.assert_called_with('host', mock.ANY) remove_args, remove_kwargs = mock_rem_fs_remove.call_args self.assertTrue(remove_args[1].startswith('/path')) return result def test_shared_storage_detection_same_host(self): self.assertTrue(self._test_shared_storage_detection(True)) def test_shared_storage_detection_different_host(self): self.assertFalse(self._test_shared_storage_detection(False)) def test_shared_storage_detection_easy(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.mox.StubOutWithMock(drvr, 'get_host_ip_addr') self.mox.StubOutWithMock(utils, 'execute') self.mox.StubOutWithMock(os.path, 'exists') self.mox.StubOutWithMock(os, 'unlink') drvr.get_host_ip_addr().AndReturn('foo') self.mox.ReplayAll() self.assertTrue(drvr._is_storage_shared_with('foo', '/path')) def test_store_pid_remove_pid(self): instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) popen = mock.Mock(pid=3) drvr.job_tracker.add_job(instance, popen.pid) self.assertIn(3, drvr.job_tracker.jobs[instance.uuid]) drvr.job_tracker.remove_job(instance, popen.pid) self.assertNotIn(instance.uuid, drvr.job_tracker.jobs) @mock.patch('nova.virt.libvirt.host.Host.get_domain') def test_get_domain_info_with_more_return(self, mock_get_domain): instance = objects.Instance(**self.test_instance) dom_mock = mock.MagicMock() dom_mock.info.return_value = [ 1, 2048, 737, 8, 12345, 888888 ] dom_mock.ID.return_value = mock.sentinel.instance_id mock_get_domain.return_value = dom_mock drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) info = drvr.get_info(instance) self.assertEqual(1, info.state) self.assertEqual(2048, info.max_mem_kb) self.assertEqual(737, info.mem_kb) self.assertEqual(8, info.num_cpu) self.assertEqual(12345, info.cpu_time_ns) self.assertEqual(mock.sentinel.instance_id, info.id) dom_mock.info.assert_called_once_with() dom_mock.ID.assert_called_once_with() mock_get_domain.assert_called_once_with(instance) def test_create_domain(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_domain = mock.MagicMock() guest = drvr._create_domain(domain=mock_domain) self.assertEqual(mock_domain, guest._domain) mock_domain.createWithFlags.assert_has_calls([mock.call(0)]) @mock.patch('nova.virt.disk.api.clean_lxc_namespace') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_clean): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.return_value = hardware.InstanceInfo( state=power_state.RUNNING) with test.nested( mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')]) @mock.patch('nova.virt.disk.api.clean_lxc_namespace') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc_id_maps(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_chown, mock_get_info, mock_clean): self.flags(virt_type='lxc', uid_maps=["0:1000:100"], gid_maps=["0:1000:100"], group='libvirt') def chown_side_effect(path, id_maps): self.assertEqual('/tmp/rootfs', path) self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap) self.assertEqual(0, id_maps[0].start) self.assertEqual(1000, id_maps[0].target) self.assertEqual(100, id_maps[0].count) self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap) self.assertEqual(0, id_maps[1].start) self.assertEqual(1000, id_maps[1].target) self.assertEqual(100, id_maps[1].count) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_chown.side_effect = chown_side_effect mock_get_info.return_value = hardware.InstanceInfo( state=power_state.RUNNING) with test.nested( mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter') ) as ( mock_is_booted_from_volume, mock_create_domain, mock_plug_vifs, mock_setup_basic_filtering, mock_prepare_instance_filter, mock_apply_instance_filter ): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_is_booted_from_volume.assert_called_once_with(mock_instance, {}) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')]) @mock.patch('nova.virt.disk.api.teardown_container') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info') @mock.patch('nova.virt.disk.api.setup_container') @mock.patch('oslo_utils.fileutils.ensure_tree') @mock.patch.object(fake_libvirt_utils, 'get_instance_path') def test_create_domain_lxc_not_running(self, mock_get_inst_path, mock_ensure_tree, mock_setup_container, mock_get_info, mock_teardown): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) mock_instance = mock.MagicMock() inst_sys_meta = dict() mock_instance.system_metadata = inst_sys_meta mock_get_inst_path.return_value = '/tmp/' mock_image_backend = mock.MagicMock() drvr.image_backend = mock_image_backend mock_image = mock.MagicMock() mock_image.path = '/tmp/test.img' drvr.image_backend.image.return_value = mock_image mock_setup_container.return_value = '/dev/nbd0' mock_get_info.return_value = hardware.InstanceInfo( state=power_state.SHUTDOWN) with test.nested( mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')): drvr._create_domain_and_network(self.context, 'xml', mock_instance, [], None) self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name']) self.assertFalse(mock_instance.called) mock_get_inst_path.assert_has_calls([mock.call(mock_instance)]) mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')]) drvr.image_backend.image.assert_has_calls([mock.call(mock_instance, 'disk')]) setup_container_call = mock.call( mock_image.get_model(), container_dir='/tmp/rootfs') mock_setup_container.assert_has_calls([setup_container_call]) mock_get_info.assert_has_calls([mock.call(mock_instance)]) teardown_call = mock.call(container_dir='/tmp/rootfs') mock_teardown.assert_has_calls([teardown_call]) def test_create_domain_define_xml_fails(self): """Tests that the xml is logged when defining the domain fails.""" fake_xml = "<test>this is a test</test>" def fake_defineXML(xml): self.assertEqual(fake_xml, xml) raise fakelibvirt.libvirtError('virDomainDefineXML() failed') def fake_safe_decode(text, *args, **kwargs): return text + 'safe decoded' self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.assertIn('safe decoded', msg % args) self.stubs.Set(encodeutils, 'safe_decode', fake_safe_decode) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock(defineXML=fake_defineXML) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain, fake_xml) self.assertTrue(self.log_error_called) def test_create_domain_with_flags_fails(self): """Tests that the xml is logged when creating the domain with flags fails """ fake_xml = "<test>this is a test</test>" fake_domain = FakeVirtDomain(fake_xml) def fake_createWithFlags(launch_flags): raise fakelibvirt.libvirtError('virDomainCreateWithFlags() failed') self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock() self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain, domain=fake_domain) self.assertTrue(self.log_error_called) def test_create_domain_enable_hairpin_fails(self): """Tests that the xml is logged when enabling hairpin mode for the domain fails. """ fake_xml = "<test>this is a test</test>" fake_domain = FakeVirtDomain(fake_xml) def fake_execute(*args, **kwargs): raise processutils.ProcessExecutionError('error') def fake_get_interfaces(*args): return ["dev"] self.log_error_called = False def fake_error(msg, *args, **kwargs): self.log_error_called = True self.assertIn(fake_xml, msg % args) self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error) self.create_fake_libvirt_mock() self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) self.stubs.Set(nova.utils, 'execute', fake_execute) self.stubs.Set( nova.virt.libvirt.guest.Guest, 'get_interfaces', fake_get_interfaces) self.assertRaises(processutils.ProcessExecutionError, drvr._create_domain, domain=fake_domain, power_on=False) self.assertTrue(self.log_error_called) def test_get_vnc_console(self): instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<graphics type='vnc' port='5900'/>" "</devices></domain>") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) vnc_dict = drvr.get_vnc_console(self.context, instance) self.assertEqual(vnc_dict.port, '5900') def test_get_vnc_console_unavailable(self): instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices></devices></domain>") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleTypeUnavailable, drvr.get_vnc_console, self.context, instance) def test_get_spice_console(self): instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<graphics type='spice' port='5950'/>" "</devices></domain>") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) spice_dict = drvr.get_spice_console(self.context, instance) self.assertEqual(spice_dict.port, '5950') def test_get_spice_console_unavailable(self): instance = objects.Instance(**self.test_instance) dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices></devices></domain>") vdmock = self.mox.CreateMock(fakelibvirt.virDomain) self.mox.StubOutWithMock(vdmock, "XMLDesc") vdmock.XMLDesc(flags=0).AndReturn(dummyxml) def fake_lookup(instance_name): if instance_name == instance['name']: return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertRaises(exception.ConsoleTypeUnavailable, drvr.get_spice_console, self.context, instance) def test_detach_volume_with_instance_not_found(self): # Test that detach_volume() method does not raise exception, # if the instance does not exist. instance = objects.Instance(**self.test_instance) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(host.Host, 'get_domain', side_effect=exception.InstanceNotFound( instance_id=instance.uuid)), mock.patch.object(drvr, '_disconnect_volume') ) as (_get_domain, _disconnect_volume): connection_info = {'driver_volume_type': 'fake'} drvr.detach_volume(connection_info, instance, '/dev/sda') _get_domain.assert_called_once_with(instance) _disconnect_volume.assert_called_once_with(connection_info, 'sda') def _test_attach_detach_interface_get_config(self, method_name): """Tests that the get_config() method is properly called in attach_interface() and detach_interface(). method_name: either \"attach_interface\" or \"detach_interface\" depending on the method to test. """ self.stubs.Set(host.Host, "get_domain", lambda a, b: FakeVirtDomain()) instance = objects.Instance(**self.test_instance) network_info = _fake_network_info(self, 1) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) fake_image_meta = objects.ImageMeta.from_dict( {'id': instance['image_ref']}) if method_name == "attach_interface": self.mox.StubOutWithMock(drvr.firewall_driver, 'setup_basic_filtering') drvr.firewall_driver.setup_basic_filtering(instance, network_info) expected = drvr.vif_driver.get_config(instance, network_info[0], fake_image_meta, instance.get_flavor(), CONF.libvirt.virt_type, drvr._host) self.mox.StubOutWithMock(drvr.vif_driver, 'get_config') drvr.vif_driver.get_config(instance, network_info[0], mox.IsA(objects.ImageMeta), mox.IsA(objects.Flavor), CONF.libvirt.virt_type, drvr._host).\ AndReturn(expected) self.mox.ReplayAll() if method_name == "attach_interface": drvr.attach_interface(instance, fake_image_meta, network_info[0]) elif method_name == "detach_interface": drvr.detach_interface(instance, network_info[0]) else: raise ValueError("Unhandled method %s" % method_name) @mock.patch.object(lockutils, "external_lock") def test_attach_interface_get_config(self, mock_lock): """Tests that the get_config() method is properly called in attach_interface(). """ mock_lock.return_value = threading.Semaphore() self._test_attach_detach_interface_get_config("attach_interface") def test_detach_interface_get_config(self): """Tests that the get_config() method is properly called in detach_interface(). """ self._test_attach_detach_interface_get_config("detach_interface") def test_default_root_device_name(self): instance = {'uuid': 'fake_instance'} image_meta = objects.ImageMeta.from_dict({'id': uuids.image_id}) root_bdm = {'source_type': 'image', 'destination_type': 'volume', 'image_id': 'fake_id'} self.flags(virt_type='qemu', group='libvirt') self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type') self.mox.StubOutWithMock(blockinfo, 'get_root_info') blockinfo.get_disk_bus_for_device_type(instance, 'qemu', image_meta, 'disk').InAnyOrder().\ AndReturn('virtio') blockinfo.get_disk_bus_for_device_type(instance, 'qemu', image_meta, 'cdrom').InAnyOrder().\ AndReturn('ide') blockinfo.get_root_info(instance, 'qemu', image_meta, root_bdm, 'virtio', 'ide').AndReturn({'dev': 'vda'}) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.assertEqual(drvr.default_root_device_name(instance, image_meta, root_bdm), '/dev/vda') @mock.patch.object(objects.BlockDeviceMapping, "save") def test_default_device_names_for_instance(self, save_mock): instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' ephemerals = [objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'device_name': 'vdb', 'source_type': 'blank', 'volume_size': 2, 'destination_type': 'local'}))] swap = [objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'device_name': 'vdg', 'source_type': 'blank', 'volume_size': 512, 'guest_format': 'swap', 'destination_type': 'local'}))] block_device_mapping = [ objects.BlockDeviceMapping( **fake_block_device.AnonFakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-image-id', 'device_name': '/dev/vdxx', 'disk_bus': 'scsi'}))] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.default_device_names_for_instance(instance, instance.root_device_name, ephemerals, swap, block_device_mapping) # Ephemeral device name was correct so no changes self.assertEqual('/dev/vdb', ephemerals[0].device_name) # Swap device name was incorrect so it was changed self.assertEqual('/dev/vdc', swap[0].device_name) # Volume device name was changed too, taking the bus into account self.assertEqual('/dev/sda', block_device_mapping[0].device_name) self.assertEqual(3, save_mock.call_count) def _test_get_device_name_for_instance(self, new_bdm, expected_dev): instance = objects.Instance(**self.test_instance) instance.root_device_name = '/dev/vda' instance.ephemeral_gb = 0 drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) got_dev = drvr.get_device_name_for_instance( instance, [], new_bdm) self.assertEqual(expected_dev, got_dev) def test_get_device_name_for_instance_simple(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus=None, device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/vdb') def test_get_device_name_for_instance_suggested(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name='/dev/vdg', guest_format=None, disk_bus=None, device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/vdb') def test_get_device_name_for_instance_bus(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus='scsi', device_type=None) self._test_get_device_name_for_instance(new_bdm, '/dev/sda') def test_get_device_name_for_instance_device_type(self): new_bdm = objects.BlockDeviceMapping( context=context, source_type='volume', destination_type='volume', boot_index=-1, volume_id='fake-id', device_name=None, guest_format=None, disk_bus=None, device_type='floppy') self._test_get_device_name_for_instance(new_bdm, '/dev/fda') def test_is_supported_fs_format(self): supported_fs = [disk_api.FS_FORMAT_EXT2, disk_api.FS_FORMAT_EXT3, disk_api.FS_FORMAT_EXT4, disk_api.FS_FORMAT_XFS] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) for fs in supported_fs: self.assertTrue(drvr.is_supported_fs_format(fs)) supported_fs = ['', 'dummy'] drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) for fs in supported_fs: self.assertFalse(drvr.is_supported_fs_format(fs)) def test_post_live_migration_at_destination_with_block_device_info(self): # Preparing mocks mock_domain = self.mox.CreateMock(fakelibvirt.virDomain) self.resultXML = None def fake_getLibVersion(): return fakelibvirt.FAKE_LIBVIRT_VERSION def fake_getCapabilities(): return """ <capabilities> <host> <uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid> <cpu> <arch>x86_64</arch> <model>Penryn</model> <vendor>Intel</vendor> <topology sockets='1' cores='2' threads='1'/> <feature name='xtpr'/> </cpu> </host> </capabilities> """ def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None, write_to_disk=False): if image_meta is None: image_meta = objects.ImageMeta.from_dict({}) conf = drvr._get_guest_config(instance, network_info, image_meta, disk_info, rescue, block_device_info) self.resultXML = conf.to_xml() return self.resultXML def fake_get_domain(instance): return mock_domain def fake_baselineCPU(cpu, flag): return """<cpu mode='custom' match='exact'> <model fallback='allow'>Westmere</model> <vendor>Intel</vendor> <feature policy='require' name='aes'/> </cpu> """ network_info = _fake_network_info(self, 1) self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion, getCapabilities=fake_getCapabilities, getVersion=lambda: 1005001, listDefinedDomains=lambda: [], numOfDomains=lambda: 0, baselineCPU=fake_baselineCPU) instance_ref = self.test_instance instance_ref['image_ref'] = 123456 # we send an int to test sha1 call instance = objects.Instance(**instance_ref) self.mox.ReplayAll() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.stubs.Set(drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(host.Host, 'get_domain', fake_get_domain) bdm = objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'guest_format': None, 'boot_index': 0, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vda', 'disk_bus': 'virtio', 'device_type': 'disk', 'delete_on_termination': False})) block_device_info = {'block_device_mapping': driver_block_device.convert_volumes([bdm])} block_device_info['block_device_mapping'][0]['connection_info'] = ( {'driver_volume_type': 'iscsi'}) with test.nested( mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save'), mock.patch.object(objects.Instance, 'save') ) as (mock_volume_save, mock_instance_save): drvr.post_live_migration_at_destination( self.context, instance, network_info, True, block_device_info=block_device_info) self.assertIn('fake', self.resultXML) mock_volume_save.assert_called_once_with() def test_create_propagates_exceptions(self): self.flags(virt_type='lxc', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(id=1, uuid=uuids.instance, image_ref='my_fake_image') with test.nested( mock.patch.object(drvr, '_create_domain_setup_lxc'), mock.patch.object(drvr, '_create_domain_cleanup_lxc'), mock.patch.object(drvr, '_is_booted_from_volume', return_value=False), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, '_create_domain', side_effect=exception.NovaException), mock.patch.object(drvr, 'cleanup')): self.assertRaises(exception.NovaException, drvr._create_domain_and_network, self.context, 'xml', instance, None, None) def test_create_without_pause(self): self.flags(virt_type='lxc', group='libvirt') @contextlib.contextmanager def fake_lxc_disk_handler(*args, **kwargs): yield drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) with test.nested( mock.patch.object(drvr, '_lxc_disk_handler', side_effect=fake_lxc_disk_handler), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr, 'firewall_driver'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr, 'cleanup')) as ( _handler, cleanup, firewall_driver, create, plug_vifs): domain = drvr._create_domain_and_network(self.context, 'xml', instance, None, None) self.assertEqual(0, create.call_args_list[0][1]['pause']) self.assertEqual(0, domain.resume.call_count) def _test_create_with_network_events(self, neutron_failure=None, power_on=True): generated_events = [] def wait_timeout(): event = mock.MagicMock() if neutron_failure == 'timeout': raise eventlet.timeout.Timeout() elif neutron_failure == 'error': event.status = 'failed' else: event.status = 'completed' return event def fake_prepare(instance, event_name): m = mock.MagicMock() m.instance = instance m.event_name = event_name m.wait.side_effect = wait_timeout generated_events.append(m) return m virtapi = manager.ComputeVirtAPI(mock.MagicMock()) prepare = virtapi._compute.instance_events.prepare_for_instance_event prepare.side_effect = fake_prepare drvr = libvirt_driver.LibvirtDriver(virtapi, False) instance = objects.Instance(**self.test_instance) vifs = [{'id': 'vif1', 'active': False}, {'id': 'vif2', 'active': False}] @mock.patch.object(drvr, 'plug_vifs') @mock.patch.object(drvr, 'firewall_driver') @mock.patch.object(drvr, '_create_domain') @mock.patch.object(drvr, 'cleanup') def test_create(cleanup, create, fw_driver, plug_vifs): domain = drvr._create_domain_and_network(self.context, 'xml', instance, vifs, None, power_on=power_on) plug_vifs.assert_called_with(instance, vifs) pause = self._get_pause_flag(drvr, vifs, power_on=power_on) self.assertEqual(pause, create.call_args_list[0][1]['pause']) if pause: domain.resume.assert_called_once_with() if neutron_failure and CONF.vif_plugging_is_fatal: cleanup.assert_called_once_with(self.context, instance, network_info=vifs, block_device_info=None) test_create() if utils.is_neutron() and CONF.vif_plugging_timeout and power_on: prepare.assert_has_calls([ mock.call(instance, 'network-vif-plugged-vif1'), mock.call(instance, 'network-vif-plugged-vif2')]) for event in generated_events: if neutron_failure and generated_events.index(event) != 0: self.assertEqual(0, event.call_count) elif (neutron_failure == 'error' and not CONF.vif_plugging_is_fatal): event.wait.assert_called_once_with() else: self.assertEqual(0, prepare.call_count) @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron(self, is_neutron): self._test_create_with_network_events() @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_power_off(self, is_neutron): # Tests that we don't wait for events if we don't start the instance. self._test_create_with_network_events(power_on=False) @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_nowait(self, is_neutron): self.flags(vif_plugging_timeout=0) self._test_create_with_network_events() @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_nonfatal_timeout( self, is_neutron): self.flags(vif_plugging_is_fatal=False) self._test_create_with_network_events(neutron_failure='timeout') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_fatal_timeout( self, is_neutron): self.assertRaises(exception.VirtualInterfaceCreateException, self._test_create_with_network_events, neutron_failure='timeout') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_nonfatal_error( self, is_neutron): self.flags(vif_plugging_is_fatal=False) self._test_create_with_network_events(neutron_failure='error') @mock.patch('nova.utils.is_neutron', return_value=True) def test_create_with_network_events_neutron_failed_fatal_error( self, is_neutron): self.assertRaises(exception.VirtualInterfaceCreateException, self._test_create_with_network_events, neutron_failure='error') @mock.patch('nova.utils.is_neutron', return_value=False) def test_create_with_network_events_non_neutron(self, is_neutron): self._test_create_with_network_events() @mock.patch('nova.volume.encryptors.get_encryption_metadata') @mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm') def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) mock_dom = mock.MagicMock() mock_encryption_meta = mock.MagicMock() get_encryption_metadata.return_value = mock_encryption_meta fake_xml = """ <domain> <name>instance-00000001</name> <memory>1048576</memory> <vcpu>1</vcpu> <devices> <disk type='file' device='disk'> <driver name='qemu' type='raw' cache='none'/> <source file='/path/fake-volume1'/> <target dev='vda' bus='virtio'/> </disk> </devices> </domain> """ fake_volume_id = "fake-volume-id" connection_info = {"driver_volume_type": "fake", "data": {"access_mode": "rw", "volume_id": fake_volume_id}} def fake_getitem(*args, **kwargs): fake_bdm = {'connection_info': connection_info, 'mount_device': '/dev/vda'} return fake_bdm.get(args[0]) mock_volume = mock.MagicMock() mock_volume.__getitem__.side_effect = fake_getitem block_device_info = {'block_device_mapping': [mock_volume]} network_info = [network_model.VIF(id='1'), network_model.VIF(id='2', active=True)] with test.nested( mock.patch.object(drvr, '_get_volume_encryptor'), mock.patch.object(drvr, 'plug_vifs'), mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'), mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'), ) as (get_volume_encryptor, plug_vifs, setup_basic_filtering, prepare_instance_filter, create_domain, apply_instance_filter): create_domain.return_value = libvirt_guest.Guest(mock_dom) guest = drvr._create_domain_and_network( self.context, fake_xml, instance, network_info, None, block_device_info=block_device_info) get_encryption_metadata.assert_called_once_with(self.context, drvr._volume_api, fake_volume_id, connection_info) get_volume_encryptor.assert_called_once_with(connection_info, mock_encryption_meta) plug_vifs.assert_called_once_with(instance, network_info) setup_basic_filtering.assert_called_once_with(instance, network_info) prepare_instance_filter.assert_called_once_with(instance, network_info) pause = self._get_pause_flag(drvr, network_info) create_domain.assert_called_once_with( fake_xml, pause=pause, power_on=True, post_xml_callback=None) self.assertEqual(mock_dom, guest._domain) def test_get_guest_storage_config(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) test_instance = copy.deepcopy(self.test_instance) test_instance["default_swap_device"] = None instance = objects.Instance(**test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) flavor = instance.get_flavor() conn_info = {'driver_volume_type': 'fake', 'data': {}} bdm = objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict({ 'id': 1, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/vdc'})) bdi = {'block_device_mapping': driver_block_device.convert_volumes([bdm])} bdm = bdi['block_device_mapping'][0] bdm['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta, bdi) mock_conf = mock.MagicMock(source_path='fake') with test.nested( mock.patch.object(driver_block_device.DriverVolumeBlockDevice, 'save'), mock.patch.object(drvr, '_connect_volume'), mock.patch.object(drvr, '_get_volume_config', return_value=mock_conf), mock.patch.object(drvr, '_set_cache_mode') ) as (volume_save, connect_volume, get_volume_config, set_cache_mode): devices = drvr._get_guest_storage_config(instance, image_meta, disk_info, False, bdi, flavor, "hvm") self.assertEqual(3, len(devices)) self.assertEqual('/dev/vdb', instance.default_ephemeral_device) self.assertIsNone(instance.default_swap_device) connect_volume.assert_called_with(bdm['connection_info'], {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'}) get_volume_config.assert_called_with(bdm['connection_info'], {'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'}) volume_save.assert_called_once_with() self.assertEqual(3, set_cache_mode.call_count) def test_get_neutron_events(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) network_info = [network_model.VIF(id='1'), network_model.VIF(id='2', active=True)] events = drvr._get_neutron_events(network_info) self.assertEqual([('network-vif-plugged', '1')], events) def test_unplug_vifs_ignores_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: vif_driver.unplug.side_effect = exception.AgentError( method='unplug') drvr._unplug_vifs('inst', [1], ignore_errors=True) vif_driver.unplug.assert_called_once_with('inst', 1) def test_unplug_vifs_reports_errors(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) with mock.patch.object(drvr, 'vif_driver') as vif_driver: vif_driver.unplug.side_effect = exception.AgentError( method='unplug') self.assertRaises(exception.AgentError, drvr.unplug_vifs, 'inst', [1]) vif_driver.unplug.assert_called_once_with('inst', 1) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_pass_with_no_mount_device(self, undefine, unplug): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.firewall_driver = mock.Mock() drvr._disconnect_volume = mock.Mock() fake_inst = {'name': 'foo'} fake_bdms = [{'connection_info': 'foo', 'mount_device': None}] with mock.patch('nova.virt.driver' '.block_device_info_get_mapping', return_value=fake_bdms): drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False) self.assertTrue(drvr._disconnect_volume.called) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) fake_inst = {'name': 'foo'} with mock.patch.object(drvr._conn, 'lookupByName') as lookup: lookup.return_value = fake_inst # NOTE(danms): Make unplug cause us to bail early, since # we only care about how it was called unplug.side_effect = test.TestingException self.assertRaises(test.TestingException, drvr.cleanup, 'ctxt', fake_inst, 'netinfo') unplug.assert_called_once_with(fake_inst, 'netinfo', True) @mock.patch.object(libvirt_driver.LibvirtDriver, 'unfilter_instance') @mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files', return_value=True) @mock.patch.object(objects.Instance, 'save') @mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain') def test_cleanup_migrate_data_shared_block_storage(self, _undefine_domain, save, delete_instance_files, unfilter_instance): # Tests the cleanup method when migrate_data has # is_shared_block_storage=True and destroy_disks=False. instance = objects.Instance(self.context, **self.test_instance) migrate_data = objects.LibvirtLiveMigrateData( is_shared_block_storage=True) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) drvr.cleanup( self.context, instance, network_info={}, destroy_disks=False, migrate_data=migrate_data, destroy_vifs=False) delete_instance_files.assert_called_once_with(instance) self.assertEqual(1, int(instance.system_metadata['clean_attempts'])) self.assertTrue(instance.cleaned) save.assert_called_once_with() def test_swap_volume(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) mock_dom = mock.MagicMock() guest = libvirt_guest.Guest(mock_dom) with mock.patch.object(drvr._conn, 'defineXML', create=True) as mock_define: xmldoc = "<domain/>" srcfile = "/first/path" dstfile = "/second/path" mock_dom.XMLDesc.return_value = xmldoc mock_dom.isPersistent.return_value = True mock_dom.blockJobInfo.return_value = { 'type': 0, 'bandwidth': 0, 'cur': 100, 'end': 100 } drvr._swap_volume(guest, srcfile, dstfile, 1) mock_dom.XMLDesc.assert_called_once_with( flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE | fakelibvirt.VIR_DOMAIN_XML_SECURE)) mock_dom.blockRebase.assert_called_once_with( srcfile, dstfile, 0, flags=( fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)) mock_dom.blockResize.assert_called_once_with( srcfile, 1 * units.Gi / units.Ki) mock_define.assert_called_once_with(xmldoc) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume') @mock.patch('nova.objects.block_device.BlockDeviceMapping.' 'get_by_volume_and_instance') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume') @mock.patch('nova.virt.libvirt.host.Host.get_guest') def _test_swap_volume_driver_bdm_save(self, get_guest, connect_volume, get_volume_config, get_by_volume_and_instance, swap_volume, disconnect_volume, volume_save, source_type): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) instance = objects.Instance(**self.test_instance) old_connection_info = {'driver_volume_type': 'fake', 'serial': 'old-volume-id', 'data': {'device_path': '/fake-old-volume', 'access_mode': 'rw'}} new_connection_info = {'driver_volume_type': 'fake', 'serial': 'new-volume-id', 'data': {'device_path': '/fake-new-volume', 'access_mode': 'rw'}} mock_dom = mock.MagicMock() guest = libvirt_guest.Guest(mock_dom) mock_dom.XMLDesc.return_value = """<domain> <devices> <disk type='file'> <source file='/fake-old-volume'/> <target dev='vdb' bus='virtio'/> </disk> </devices> </domain> """ mock_dom.name.return_value = 'inst' mock_dom.UUIDString.return_value = 'uuid' get_guest.return_value = guest disk_info = {'bus': 'virtio', 'type': 'disk', 'dev': 'vdb'} get_volume_config.return_value = mock.MagicMock( source_path='/fake-new-volume') bdm = objects.BlockDeviceMapping(self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 2, 'instance_uuid': uuids.instance, 'device_name': '/dev/vdb', 'source_type': source_type, 'destination_type': 'volume', 'volume_id': 'fake-volume-id-2', 'boot_index': 0})) get_by_volume_and_instance.return_value = bdm conn.swap_volume(old_connection_info, new_connection_info, instance, '/dev/vdb', 1) get_guest.assert_called_once_with(instance) connect_volume.assert_called_once_with(new_connection_info, disk_info) swap_volume.assert_called_once_with(guest, 'vdb', '/fake-new-volume', 1) disconnect_volume.assert_called_once_with(old_connection_info, 'vdb') volume_save.assert_called_once_with() @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save') def test_swap_volume_driver_bdm_save_source_is_volume(self, volume_save): self._test_swap_volume_driver_bdm_save(volume_save=volume_save, source_type='volume') @mock.patch('nova.virt.block_device.DriverImageBlockDevice.save') def test_swap_volume_driver_bdm_save_source_is_image(self, volume_save): self._test_swap_volume_driver_bdm_save(volume_save=volume_save, source_type='image') @mock.patch('nova.virt.block_device.DriverSnapshotBlockDevice.save') def test_swap_volume_driver_bdm_save_source_is_snapshot(self, volume_save): self._test_swap_volume_driver_bdm_save(volume_save=volume_save, source_type='snapshot') def _test_live_snapshot(self, can_quiesce=False, require_quiesce=False): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI()) mock_dom = mock.MagicMock() test_image_meta = self.test_image_meta.copy() if require_quiesce: test_image_meta = {'properties': {'os_require_quiesce': 'yes'}} with test.nested( mock.patch.object(drvr._conn, 'defineXML', create=True), mock.patch.object(fake_libvirt_utils, 'get_disk_size'), mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'), mock.patch.object(fake_libvirt_utils, 'create_cow_image'), mock.patch.object(fake_libvirt_utils, 'chown'), mock.patch.object(fake_libvirt_utils, 'extract_snapshot'), mock.patch.object(drvr, '_set_quiesced') ) as (mock_define, mock_size, mock_backing, mock_create_cow, mock_chown, mock_snapshot, mock_quiesce): xmldoc = "<domain/>" srcfile = "/first/path" dstfile = "/second/path" bckfile = "/other/path" dltfile = dstfile + ".delta" mock_dom.XMLDesc.return_value = xmldoc mock_dom.isPersistent.return_value = True mock_size.return_value = 1004009 mock_backing.return_value = bckfile guest = libvirt_guest.Guest(mock_dom) if not can_quiesce: mock_quiesce.side_effect = ( exception.InstanceQuiesceNotSupported( instance_id=self.test_instance['id'], reason='test')) image_meta = objects.ImageMeta.from_dict(test_image_meta) drvr._live_snapshot(self.context, self.test_instance, guest, srcfile, dstfile, "qcow2", "qcow2", image_meta) mock_dom.XMLDesc.assert_called_once_with(flags=( fakelibvirt.VIR_DOMAIN_XML_INACTIVE | fakelibvirt.VIR_DOMAIN_XML_SECURE)) mock_dom.blockRebase.assert_called_once_with( srcfile, dltfile, 0, flags=( fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT | fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)) mock_size.assert_called_once_with(srcfile, format="qcow2") mock_backing.assert_called_once_with(srcfile, basename=False, format="qcow2") mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009) mock_chown.assert_called_once_with(dltfile, os.getuid()) mock_snapshot.assert_called_once_with(dltfile, "qcow2", dstfile, "qcow2") mock_define.assert_called_once_with(xmldoc) mock_quiesce.assert_any_call(mock.ANY, self.test_instance, mock.ANY, True) if can_quiesce: mock_quiesce.assert_any_call(mock.ANY, self.test_instance, mock.ANY, False) def test_live_snapshot(self): self._test_live_snapshot() def test_live_snapshot_with_quiesce(self): self._test_live_snapshot(can_quiesce=True) def test_live_snapshot_with_require_quiesce(self): self._test_live_snapshot(can_quiesce=True, require_quiesce=True) def test_live_snapshot_with_require_quiesce_fails(self): self.assertRaises(exception.InstanceQuiesceNotSupported, self._test_live_snapshot, can_quiesce=False, require_quiesce=True) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration") def test_live_migration_hostname_valid(self, mock_lm): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.live_migration(self.context, self.test_instance, "host1.example.com", lambda x: x, lambda x: x) self.assertEqual(1, mock_lm.call_count) @mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration") @mock.patch.object(fake_libvirt_utils, "is_valid_hostname") def test_live_migration_hostname_invalid(self, mock_hostname, mock_lm): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_hostname.return_value = False self.assertRaises(exception.InvalidHostname, drvr.live_migration, self.context, self.test_instance, "foo/?com=/bin/sh", lambda x: x, lambda x: x) def test_live_migration_force_complete(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = fake_instance.fake_instance_obj( None, name='instancename', id=1, uuid='c83a75d4-4d53-4be5-9a40-04d9c0389ff8') drvr.active_migrations[instance.uuid] = deque() drvr.live_migration_force_complete(instance) self.assertEqual( 1, drvr.active_migrations[instance.uuid].count("force-complete")) @mock.patch.object(host.Host, "get_connection") @mock.patch.object(fakelibvirt.virDomain, "abortJob") def test_live_migration_abort(self, mock_abort, mock_conn): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", False) guest = libvirt_guest.Guest(dom) with mock.patch.object(nova.virt.libvirt.host.Host, 'get_guest', return_value=guest): drvr.live_migration_abort(self.test_instance) self.assertTrue(mock_abort.called) @mock.patch('os.path.exists', return_value=True) @mock.patch('tempfile.mkstemp') @mock.patch('os.close', return_value=None) def test_check_instance_shared_storage_local_raw(self, mock_close, mock_mkstemp, mock_exists): instance_uuid = str(uuid.uuid4()) self.flags(images_type='raw', group='libvirt') self.flags(instances_path='/tmp') mock_mkstemp.return_value = (-1, '/tmp/{0}/file'.format(instance_uuid)) driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) temp_file = driver.check_instance_shared_storage_local(self.context, instance) self.assertEqual('/tmp/{0}/file'.format(instance_uuid), temp_file['filename']) def test_check_instance_shared_storage_local_rbd(self): self.flags(images_type='rbd', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(**self.test_instance) self.assertIsNone(driver. check_instance_shared_storage_local(self.context, instance)) def test_version_to_string(self): driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) string_ver = driver._version_to_string((4, 33, 173)) self.assertEqual("4.33.173", string_ver) def test_parallels_min_version_fail(self): self.flags(virt_type='parallels', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(driver._conn, 'getLibVersion', return_value=1002011): self.assertRaises(exception.NovaException, driver.init_host, 'wibble') def test_parallels_min_version_ok(self): self.flags(virt_type='parallels', group='libvirt') driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with mock.patch.object(driver._conn, 'getLibVersion', return_value=1002012): driver.init_host('wibble') def test_get_guest_config_parallels_vm(self): self.flags(virt_type='parallels', group='libvirt') self.flags(images_type='ploop', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance_ref = objects.Instance(**self.test_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta) cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info) self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory) self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus) self.assertEqual(vm_mode.HVM, cfg.os_type) self.assertIsNone(cfg.os_root) self.assertEqual(6, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestDisk) self.assertEqual(cfg.devices[0].driver_format, "ploop") self.assertIsInstance(cfg.devices[1], vconfig.LibvirtConfigGuestDisk) self.assertIsInstance(cfg.devices[2], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[3], vconfig.LibvirtConfigGuestInput) self.assertIsInstance(cfg.devices[4], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[5], vconfig.LibvirtConfigGuestVideo) def test_get_guest_config_parallels_ct_rescue(self): self._test_get_guest_config_parallels_ct(rescue=True) def test_get_guest_config_parallels_ct(self): self._test_get_guest_config_parallels_ct(rescue=False) def _test_get_guest_config_parallels_ct(self, rescue=False): self.flags(virt_type='parallels', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) ct_instance = self.test_instance.copy() ct_instance["vm_mode"] = vm_mode.EXE instance_ref = objects.Instance(**ct_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) if rescue: rescue_data = ct_instance else: rescue_data = None cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, {'mapping': {'disk': {}}}, rescue_data) self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory) self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus) self.assertEqual(vm_mode.EXE, cfg.os_type) self.assertEqual("/sbin/init", cfg.os_init_path) self.assertIsNone(cfg.os_root) if rescue: self.assertEqual(5, len(cfg.devices)) else: self.assertEqual(4, len(cfg.devices)) self.assertIsInstance(cfg.devices[0], vconfig.LibvirtConfigGuestFilesys) device_index = 0 fs = cfg.devices[device_index] self.assertEqual(fs.source_type, "file") self.assertEqual(fs.driver_type, "ploop") self.assertEqual(fs.target_dir, "/") if rescue: device_index = 1 fs = cfg.devices[device_index] self.assertEqual(fs.source_type, "file") self.assertEqual(fs.driver_type, "ploop") self.assertEqual(fs.target_dir, "/mnt/rescue") self.assertIsInstance(cfg.devices[device_index + 1], vconfig.LibvirtConfigGuestInterface) self.assertIsInstance(cfg.devices[device_index + 2], vconfig.LibvirtConfigGuestGraphics) self.assertIsInstance(cfg.devices[device_index + 3], vconfig.LibvirtConfigGuestVideo) def _test_get_guest_config_parallels_volume(self, vmmode, devices): self.flags(virt_type='parallels', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) ct_instance = self.test_instance.copy() ct_instance["vm_mode"] = vmmode instance_ref = objects.Instance(**ct_instance) image_meta = objects.ImageMeta.from_dict(self.test_image_meta) conn_info = {'driver_volume_type': 'fake'} bdm = objects.BlockDeviceMapping( self.context, **fake_block_device.FakeDbBlockDeviceDict( {'id': 0, 'source_type': 'volume', 'destination_type': 'volume', 'device_name': '/dev/sda'})) info = {'block_device_mapping': driver_block_device.convert_volumes( [bdm])} info['block_device_mapping'][0]['connection_info'] = conn_info disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance_ref, image_meta, info) with mock.patch.object( driver_block_device.DriverVolumeBlockDevice, 'save' ) as mock_save: cfg = drvr._get_guest_config(instance_ref, _fake_network_info(self, 1), image_meta, disk_info, None, info) mock_save.assert_called_once_with() self.assertEqual("parallels", cfg.virt_type) self.assertEqual(instance_ref["uuid"], cfg.uuid) self.assertEqual(instance_ref.flavor.memory_mb * units.Ki, cfg.memory) self.assertEqual(instance_ref.flavor.vcpus, cfg.vcpus) self.assertEqual(vmmode, cfg.os_type) self.assertIsNone(cfg.os_root) self.assertEqual(devices, len(cfg.devices)) disk_found = False for dev in cfg.devices: result = isinstance(dev, vconfig.LibvirtConfigGuestFilesys) self.assertFalse(result) if (isinstance(dev, vconfig.LibvirtConfigGuestDisk) and (dev.source_path is None or 'disk.local' not in dev.source_path)): self.assertEqual("disk", dev.source_device) self.assertEqual("sda", dev.target_dev) disk_found = True self.assertTrue(disk_found) def test_get_guest_config_parallels_volume(self): self._test_get_guest_config_parallels_volume(vm_mode.EXE, 4) self._test_get_guest_config_parallels_volume(vm_mode.HVM, 6) def test_get_guest_disk_config_rbd_older_config_drive_fall_back(self): # New config drives are stored in rbd but existing instances have # config drives in the old location under the instances path. # Test that the driver falls back to 'flat' for config drive if it # doesn't exist in rbd. self.flags(images_type='rbd', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() mock_rbd_image = mock.Mock() mock_flat_image = mock.Mock() mock_flat_image.libvirt_info.return_value = mock.sentinel.diskconfig drvr.image_backend.image.side_effect = [mock_rbd_image, mock_flat_image] mock_rbd_image.exists.return_value = False instance = objects.Instance() disk_mapping = {'disk.config': {'bus': 'ide', 'dev': 'hdd', 'type': 'file'}} flavor = objects.Flavor(extra_specs={}) diskconfig = drvr._get_guest_disk_config( instance, 'disk.config', disk_mapping, flavor, drvr._get_disk_config_image_type()) self.assertEqual(2, drvr.image_backend.image.call_count) call1 = mock.call(instance, 'disk.config', 'rbd') call2 = mock.call(instance, 'disk.config', 'flat') drvr.image_backend.image.assert_has_calls([call1, call2]) self.assertEqual(mock.sentinel.diskconfig, diskconfig) def _test_prepare_domain_for_snapshot(self, live_snapshot, state): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance_ref = objects.Instance(**self.test_instance) with mock.patch.object(drvr, "suspend") as mock_suspend: drvr._prepare_domain_for_snapshot( self.context, live_snapshot, state, instance_ref) return mock_suspend.called def test_prepare_domain_for_snapshot(self): # Ensure that suspend() is only called on RUNNING or PAUSED instances for test_power_state in power_state.STATE_MAP.keys(): if test_power_state in (power_state.RUNNING, power_state.PAUSED): self.assertTrue(self._test_prepare_domain_for_snapshot( False, test_power_state)) else: self.assertFalse(self._test_prepare_domain_for_snapshot( False, test_power_state)) def test_prepare_domain_for_snapshot_lxc(self): self.flags(virt_type='lxc', group='libvirt') # Ensure that suspend() is never called with LXC for test_power_state in power_state.STATE_MAP.keys(): self.assertFalse(self._test_prepare_domain_for_snapshot( False, test_power_state)) def test_prepare_domain_for_snapshot_live_snapshots(self): # Ensure that suspend() is never called for live snapshots for test_power_state in power_state.STATE_MAP.keys(): self.assertFalse(self._test_prepare_domain_for_snapshot( True, test_power_state)) @mock.patch('os.walk') @mock.patch('os.path.exists') @mock.patch('os.path.getsize') @mock.patch('os.path.isdir') @mock.patch('nova.utils.execute') @mock.patch.object(host.Host, "get_domain") def test_get_instance_disk_info_parallels_ct(self, mock_get_domain, mock_execute, mock_isdir, mock_getsize, mock_exists, mock_walk): dummyxml = ("<domain type='parallels'><name>instance-0000000a</name>" "<os><type>exe</type></os>" "<devices>" "<filesystem type='file'>" "<driver format='ploop' type='ploop'/>" "<source file='/test/disk'/>" "<target dir='/'/></filesystem>" "</devices></domain>") ret = ("image: /test/disk/root.hds\n" "file format: parallels\n" "virtual size: 20G (21474836480 bytes)\n" "disk size: 789M\n") self.flags(virt_type='parallels', group='libvirt') instance = objects.Instance(**self.test_instance) instance.vm_mode = vm_mode.EXE fake_dom = FakeVirtDomain(fake_xml=dummyxml) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) mock_get_domain.return_value = fake_dom mock_walk.return_value = [('/test/disk', [], ['DiskDescriptor.xml', 'root.hds'])] def getsize_sideeffect(*args, **kwargs): if args[0] == '/test/disk/DiskDescriptor.xml': return 790 if args[0] == '/test/disk/root.hds': return 827326464 mock_getsize.side_effect = getsize_sideeffect mock_exists.return_value = True mock_isdir.return_value = True mock_execute.return_value = (ret, '') info = drvr.get_instance_disk_info(instance) info = jsonutils.loads(info) self.assertEqual(info[0]['type'], 'ploop') self.assertEqual(info[0]['path'], '/test/disk') self.assertEqual(info[0]['disk_size'], 827327254) self.assertEqual(info[0]['over_committed_disk_size'], 20647509226) self.assertEqual(info[0]['virt_disk_size'], 21474836480) class HostStateTestCase(test.NoDBTestCase): cpu_info = {"vendor": "Intel", "model": "pentium", "arch": "i686", "features": ["ssse3", "monitor", "pni", "sse2", "sse", "fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", "mtrr", "sep", "apic"], "topology": {"cores": "1", "threads": "1", "sockets": "1"}} instance_caps = [(arch.X86_64, "kvm", "hvm"), (arch.I686, "kvm", "hvm")] pci_devices = [{ "dev_id": "pci_0000_04_00_3", "address": "0000:04:10.3", "product_id": '1521', "vendor_id": '8086', "dev_type": fields.PciDeviceType.SRIOV_PF, "phys_function": None}] numa_topology = objects.NUMATopology( cells=[objects.NUMACell( id=1, cpuset=set([1, 2]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([])), objects.NUMACell( id=2, cpuset=set([3, 4]), memory=1024, cpu_usage=0, memory_usage=0, mempages=[], siblings=[], pinned_cpus=set([]))]) class FakeConnection(libvirt_driver.LibvirtDriver): """Fake connection object.""" def __init__(self): super(HostStateTestCase.FakeConnection, self).__init__(fake.FakeVirtAPI(), True) self._host = host.Host("qemu:///system") def _get_memory_mb_total(): return 497 def _get_memory_mb_used(): return 88 self._host.get_memory_mb_total = _get_memory_mb_total self._host.get_memory_mb_used = _get_memory_mb_used def _get_vcpu_total(self): return 1 def _get_vcpu_used(self): return 0 def _get_cpu_info(self): return HostStateTestCase.cpu_info def _get_disk_over_committed_size_total(self): return 0 def _get_local_gb_info(self): return {'total': 100, 'used': 20, 'free': 80} def get_host_uptime(self): return ('10:01:16 up 1:36, 6 users, ' 'load average: 0.21, 0.16, 0.19') def _get_disk_available_least(self): return 13091 def _get_instance_capabilities(self): return HostStateTestCase.instance_caps def _get_pci_passthrough_devices(self): return jsonutils.dumps(HostStateTestCase.pci_devices) def _get_host_numa_topology(self): return HostStateTestCase.numa_topology @mock.patch.object(fakelibvirt, "openAuth") def test_update_status(self, mock_open): mock_open.return_value = fakelibvirt.Connection("qemu:///system") drvr = HostStateTestCase.FakeConnection() stats = drvr.get_available_resource("compute1") self.assertEqual(stats["vcpus"], 1) self.assertEqual(stats["memory_mb"], 497) self.assertEqual(stats["local_gb"], 100) self.assertEqual(stats["vcpus_used"], 0) self.assertEqual(stats["memory_mb_used"], 88) self.assertEqual(stats["local_gb_used"], 20) self.assertEqual(stats["hypervisor_type"], 'QEMU') self.assertEqual(stats["hypervisor_version"], fakelibvirt.FAKE_QEMU_VERSION) self.assertEqual(stats["hypervisor_hostname"], 'compute1') cpu_info = jsonutils.loads(stats["cpu_info"]) self.assertEqual(cpu_info, {"vendor": "Intel", "model": "pentium", "arch": arch.I686, "features": ["ssse3", "monitor", "pni", "sse2", "sse", "fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge", "mtrr", "sep", "apic"], "topology": {"cores": "1", "threads": "1", "sockets": "1"} }) self.assertEqual(stats["disk_available_least"], 80) self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]), HostStateTestCase.pci_devices) self.assertThat(objects.NUMATopology.obj_from_db_obj( stats['numa_topology'])._to_dict(), matchers.DictMatches( HostStateTestCase.numa_topology._to_dict())) class LibvirtDriverTestCase(test.NoDBTestCase): """Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver.""" def setUp(self): super(LibvirtDriverTestCase, self).setUp() os_vif.initialize() self.drvr = libvirt_driver.LibvirtDriver( fake.FakeVirtAPI(), read_only=True) self.context = context.get_admin_context() self.test_image_meta = { "disk_format": "raw", } def _create_instance(self, params=None): """Create a test instance.""" if not params: params = {} flavor = objects.Flavor(memory_mb=512, swap=0, vcpu_weight=None, root_gb=10, id=2, name=u'm1.tiny', ephemeral_gb=20, rxtx_factor=1.0, flavorid=u'1', vcpus=1, extra_specs={}) flavor.update(params.pop('flavor', {})) inst = {} inst['id'] = 1 inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b' inst['os_type'] = 'linux' inst['image_ref'] = uuids.fake_image_ref inst['reservation_id'] = 'r-fakeres' inst['user_id'] = 'fake' inst['project_id'] = 'fake' inst['instance_type_id'] = 2 inst['ami_launch_index'] = 0 inst['host'] = 'host1' inst['root_gb'] = flavor.root_gb inst['ephemeral_gb'] = flavor.ephemeral_gb inst['config_drive'] = True inst['kernel_id'] = 2 inst['ramdisk_id'] = 3 inst['key_data'] = 'ABCDEFG' inst['system_metadata'] = {} inst['metadata'] = {} inst['task_state'] = None inst.update(params) instance = fake_instance.fake_instance_obj( self.context, expected_attrs=['metadata', 'system_metadata', 'pci_devices'], flavor=flavor, **inst) # Attributes which we need to be set so they don't touch the db, # but it's not worth the effort to fake properly for field in ['numa_topology', 'vcpu_model']: setattr(instance, field, None) return instance def test_migrate_disk_and_power_off_exception(self): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ self.counter = 0 self.checked_shared_storage = False def fake_get_instance_disk_info(instance, block_device_info=None): return '[]' def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' def fake_execute(*args, **kwargs): self.counter += 1 if self.counter == 1: assert False, "intentional failure" def fake_os_path_exists(path): return True def fake_is_storage_shared(dest, inst_base): self.checked_shared_storage = True return False self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) self.stubs.Set(self.drvr, '_is_storage_shared_with', fake_is_storage_shared) self.stubs.Set(utils, 'execute', fake_execute) self.stub_out('os.path.exists', fake_os_path_exists) ins_ref = self._create_instance() flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) self.assertRaises(AssertionError, self.drvr.migrate_disk_and_power_off, context.get_admin_context(), ins_ref, '10.0.0.2', flavor_obj, None) def _test_migrate_disk_and_power_off(self, flavor_obj, block_device_info=None, params_for_instance=None): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ instance = self._create_instance(params=params_for_instance) disk_info = fake_disk_info_json(instance) def fake_get_instance_disk_info(instance, block_device_info=None): return disk_info def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' def fake_execute(*args, **kwargs): pass def fake_copy_image(src, dest, host=None, receive=False, on_execute=None, on_completion=None, compression=True): self.assertIsNotNone(on_execute) self.assertIsNotNone(on_completion) self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) self.stubs.Set(utils, 'execute', fake_execute) self.stubs.Set(libvirt_utils, 'copy_image', fake_copy_image) # dest is different host case out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), instance, '10.0.0.2', flavor_obj, None, block_device_info=block_device_info) self.assertEqual(out, disk_info) # dest is same host case out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), instance, '10.0.0.1', flavor_obj, None, block_device_info=block_device_info) self.assertEqual(out, disk_info) def test_migrate_disk_and_power_off(self): flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) self._test_migrate_disk_and_power_off(flavor_obj) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume') def test_migrate_disk_and_power_off_boot_from_volume(self, disconnect_volume): info = {'block_device_mapping': [{'boot_index': None, 'mount_device': '/dev/vdd', 'connection_info': None}, {'boot_index': 0, 'mount_device': '/dev/vda', 'connection_info': None}]} flavor = {'root_gb': 1, 'ephemeral_gb': 0} flavor_obj = objects.Flavor(**flavor) # Note(Mike_D): The size of instance's ephemeral_gb is 0 gb. self._test_migrate_disk_and_power_off( flavor_obj, block_device_info=info, params_for_instance={'image_ref': None, 'flavor': {'root_gb': 1, 'ephemeral_gb': 0}}) disconnect_volume.assert_called_with( info['block_device_mapping'][1]['connection_info'], 'vda') @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.utils.copy_image') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info, get_host_ip_addr, mock_destroy, mock_copy_image, mock_execute): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .migrate_disk_and_power_off. """ self.copy_or_move_swap_called = False # Original instance config instance = self._create_instance({'flavor': {'root_gb': 10, 'ephemeral_gb': 0}}) disk_info = fake_disk_info_json(instance) mock_get_disk_info.return_value = disk_info get_host_ip_addr.return_value = '10.0.0.1' def fake_copy_image(*args, **kwargs): # disk.swap should not be touched since it is skipped over if '/test/disk.swap' in list(args): self.copy_or_move_swap_called = True def fake_execute(*args, **kwargs): # disk.swap should not be touched since it is skipped over if set(['mv', '/test/disk.swap']).issubset(list(args)): self.copy_or_move_swap_called = True mock_copy_image.side_effect = fake_copy_image mock_execute.side_effect = fake_execute drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # Re-size fake instance to 20G root and 1024M swap disk flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024} flavor_obj = objects.Flavor(**flavor) # Destination is same host out = drvr.migrate_disk_and_power_off(context.get_admin_context(), instance, '10.0.0.1', flavor_obj, None) mock_get_disk_info.assert_called_once_with(instance, block_device_info=None) self.assertTrue(get_host_ip_addr.called) mock_destroy.assert_called_once_with(instance) self.assertFalse(self.copy_or_move_swap_called) self.assertEqual(disk_info, out) def _test_migrate_disk_and_power_off_resize_check(self, expected_exc): """Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection .migrate_disk_and_power_off. """ instance = self._create_instance() disk_info = fake_disk_info_json(instance) def fake_get_instance_disk_info(instance, xml=None, block_device_info=None): return disk_info def fake_destroy(instance): pass def fake_get_host_ip_addr(): return '10.0.0.1' self.stubs.Set(self.drvr, 'get_instance_disk_info', fake_get_instance_disk_info) self.stubs.Set(self.drvr, '_destroy', fake_destroy) self.stubs.Set(self.drvr, 'get_host_ip_addr', fake_get_host_ip_addr) flavor = {'root_gb': 10, 'ephemeral_gb': 20} flavor_obj = objects.Flavor(**flavor) # Migration is not implemented for LVM backed instances self.assertRaises(expected_exc, self.drvr.migrate_disk_and_power_off, None, instance, '10.0.0.1', flavor_obj, None) @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '._is_storage_shared_with') def _test_migrate_disk_and_power_off_backing_file(self, shared_storage, mock_is_shared_storage, mock_get_disk_info, mock_destroy, mock_execute): self.convert_file_called = False flavor = {'root_gb': 20, 'ephemeral_gb': 30, 'swap': 0} flavor_obj = objects.Flavor(**flavor) disk_info = [{'type': 'qcow2', 'path': '/test/disk', 'virt_disk_size': '10737418240', 'backing_file': '/base/disk', 'disk_size': '83886080'}] disk_info_text = jsonutils.dumps(disk_info) mock_get_disk_info.return_value = disk_info_text mock_is_shared_storage.return_value = shared_storage def fake_execute(*args, **kwargs): self.assertNotEqual(args[0:2], ['qemu-img', 'convert']) mock_execute.side_effect = fake_execute instance = self._create_instance() out = self.drvr.migrate_disk_and_power_off( context.get_admin_context(), instance, '10.0.0.2', flavor_obj, None) self.assertTrue(mock_is_shared_storage.called) mock_destroy.assert_called_once_with(instance) self.assertEqual(out, disk_info_text) def test_migrate_disk_and_power_off_shared_storage(self): self._test_migrate_disk_and_power_off_backing_file(True) def test_migrate_disk_and_power_off_non_shared_storage(self): self._test_migrate_disk_and_power_off_backing_file(False) def test_migrate_disk_and_power_off_lvm(self): self.flags(images_type='lvm', group='libvirt') def fake_execute(*args, **kwargs): pass self.stubs.Set(utils, 'execute', fake_execute) expected_exc = exception.InstanceFaultRollback self._test_migrate_disk_and_power_off_resize_check(expected_exc) def test_migrate_disk_and_power_off_resize_cannot_ssh(self): def fake_execute(*args, **kwargs): raise processutils.ProcessExecutionError() def fake_is_storage_shared(dest, inst_base): self.checked_shared_storage = True return False self.stubs.Set(self.drvr, '_is_storage_shared_with', fake_is_storage_shared) self.stubs.Set(utils, 'execute', fake_execute) expected_exc = exception.InstanceFaultRollback self._test_migrate_disk_and_power_off_resize_check(expected_exc) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_error(self, mock_get_disk_info): instance = self._create_instance() flavor = {'root_gb': 5, 'ephemeral_gb': 10} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = fake_disk_info_json(instance) self.assertRaises( exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_error_default_ephemeral( self, mock_get_disk_info): # Note(Mike_D): The size of this instance's ephemeral_gb is 20 gb. instance = self._create_instance() flavor = {'root_gb': 10, 'ephemeral_gb': 0} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = fake_disk_info_json(instance) self.assertRaises(exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') @mock.patch('nova.virt.driver.block_device_info_get_ephemerals') def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get, mock_get_disk_info): mappings = [ { 'device_name': '/dev/sdb4', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': 'swap', 'boot_index': -1, 'volume_size': 1 }, { 'device_name': '/dev/sda1', 'source_type': 'volume', 'destination_type': 'volume', 'device_type': 'disk', 'volume_id': 1, 'guest_format': None, 'boot_index': 1, 'volume_size': 6 }, { 'device_name': '/dev/sda2', 'source_type': 'snapshot', 'destination_type': 'volume', 'snapshot_id': 1, 'device_type': 'disk', 'guest_format': None, 'boot_index': 0, 'volume_size': 4 }, { 'device_name': '/dev/sda3', 'source_type': 'blank', 'destination_type': 'local', 'device_type': 'disk', 'guest_format': None, 'boot_index': -1, 'volume_size': 3 } ] mock_get.return_value = mappings instance = self._create_instance() # Old flavor, eph is 20, real disk is 3, target is 2, fail flavor = {'root_gb': 10, 'ephemeral_gb': 2} flavor_obj = objects.Flavor(**flavor) mock_get_disk_info.return_value = fake_disk_info_json(instance) self.assertRaises( exception.InstanceFaultRollback, self.drvr.migrate_disk_and_power_off, 'ctx', instance, '10.0.0.1', flavor_obj, None) # Old flavor, eph is 20, real disk is 3, target is 4 flavor = {'root_gb': 10, 'ephemeral_gb': 4} flavor_obj = objects.Flavor(**flavor) self._test_migrate_disk_and_power_off(flavor_obj) @mock.patch('nova.utils.execute') @mock.patch('nova.virt.libvirt.utils.copy_image') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy') @mock.patch('nova.virt.libvirt.utils.get_instance_path') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '._is_storage_shared_with') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver' '.get_instance_disk_info') def test_migrate_disk_and_power_off_resize_copy_disk_info(self, mock_disk_info, mock_shared, mock_path, mock_destroy, mock_copy, mock_execuate): instance = self._create_instance() disk_info = fake_disk_info_json(instance) disk_info_text = jsonutils.loads(disk_info) instance_base = os.path.dirname(disk_info_text[0]['path']) flavor = {'root_gb': 10, 'ephemeral_gb': 25} flavor_obj = objects.Flavor(**flavor) mock_disk_info.return_value = disk_info mock_path.return_value = instance_base mock_shared.return_value = False src_disk_info_path = os.path.join(instance_base + '_resize', 'disk.info') with mock.patch.object(os.path, 'exists', autospec=True) \ as mock_exists: # disk.info exists on the source mock_exists.side_effect = \ lambda path: path == src_disk_info_path self.drvr.migrate_disk_and_power_off(context.get_admin_context(), instance, mock.sentinel, flavor_obj, None) self.assertTrue(mock_exists.called) dst_disk_info_path = os.path.join(instance_base, 'disk.info') mock_copy.assert_any_call(src_disk_info_path, dst_disk_info_path, host=mock.sentinel, on_execute=mock.ANY, on_completion=mock.ANY) def test_wait_for_running(self): def fake_get_info(instance): if instance['name'] == "not_found": raise exception.InstanceNotFound(instance_id=instance['uuid']) elif instance['name'] == "running": return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) self.stubs.Set(self.drvr, 'get_info', fake_get_info) # instance not found case self.assertRaises(exception.InstanceNotFound, self.drvr._wait_for_running, {'name': 'not_found', 'uuid': 'not_found_uuid'}) # instance is running case self.assertRaises(loopingcall.LoopingCallDone, self.drvr._wait_for_running, {'name': 'running', 'uuid': 'running_uuid'}) # else case self.drvr._wait_for_running({'name': 'else', 'uuid': 'other_uuid'}) def test_disk_size_from_instance_disk_info(self): flavor_data = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30} inst = objects.Instance(flavor=objects.Flavor(**flavor_data)) self.assertEqual(10 * units.Gi, self.drvr._disk_size_from_instance(inst, 'disk')) self.assertEqual(20 * units.Gi, self.drvr._disk_size_from_instance(inst, 'disk.local')) self.assertEqual(0, self.drvr._disk_size_from_instance(inst, 'disk.swap')) @mock.patch('nova.utils.execute') def test_disk_raw_to_qcow2(self, mock_execute): path = '/test/disk' _path_qcow = path + '_qcow' self.drvr._disk_raw_to_qcow2(path) mock_execute.assert_has_calls([ mock.call('qemu-img', 'convert', '-f', 'raw', '-O', 'qcow2', path, _path_qcow), mock.call('mv', _path_qcow, path)]) @mock.patch('nova.utils.execute') def test_disk_qcow2_to_raw(self, mock_execute): path = '/test/disk' _path_raw = path + '_raw' self.drvr._disk_qcow2_to_raw(path) mock_execute.assert_has_calls([ mock.call('qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw', path, _path_raw), mock.call('mv', _path_raw, path)]) @mock.patch('nova.virt.disk.api.extend') def test_disk_resize_raw(self, mock_extend): image = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_RAW) self.drvr._disk_resize(image, 50) mock_extend.assert_called_once_with(image, 50) @mock.patch('nova.virt.disk.api.can_resize_image') @mock.patch('nova.virt.disk.api.is_image_extendable') @mock.patch('nova.virt.disk.api.extend') def test_disk_resize_qcow2( self, mock_extend, mock_can_resize, mock_is_image_extendable): with test.nested( mock.patch.object( self.drvr, '_disk_qcow2_to_raw'), mock.patch.object( self.drvr, '_disk_raw_to_qcow2'))\ as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2): mock_can_resize.return_value = True mock_is_image_extendable.return_value = True imageqcow2 = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_QCOW2) imageraw = imgmodel.LocalFileImage("/test/disk", imgmodel.FORMAT_RAW) self.drvr._disk_resize(imageqcow2, 50) mock_disk_qcow2_to_raw.assert_called_once_with(imageqcow2.path) mock_extend.assert_called_once_with(imageraw, 50) mock_disk_raw_to_qcow2.assert_called_once_with(imageqcow2.path) def _test_finish_migration(self, power_on, resize_instance=False): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .finish_migration. """ powered_on = power_on self.fake_create_domain_called = False self.fake_disk_resize_called = False create_image_called = [False] def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None, write_to_disk=False): return "" def fake_plug_vifs(instance, network_info): pass def fake_create_image(context, inst, disk_mapping, suffix='', disk_images=None, network_info=None, block_device_info=None, inject_files=True, fallback_from_host=None): self.assertFalse(inject_files) create_image_called[0] = True def fake_create_domain_and_network( context, xml, instance, network_info, disk_info, block_device_info=None, power_on=True, reboot=False, vifs_already_plugged=False, post_xml_callback=None): self.fake_create_domain_called = True self.assertEqual(powered_on, power_on) self.assertTrue(vifs_already_plugged) def fake_enable_hairpin(): pass def fake_execute(*args, **kwargs): pass def fake_get_info(instance): if powered_on: return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_disk_resize(image, size): # Assert that _create_image is called before disk resize, # otherwise we might be trying to resize a disk whose backing # file hasn't been fetched, yet. self.assertTrue(create_image_called[0]) self.fake_disk_resize_called = True self.flags(use_cow_images=True) self.stubs.Set(self.drvr, '_disk_resize', fake_disk_resize) self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(self.drvr, '_create_image', fake_create_image) self.stubs.Set(self.drvr, '_create_domain_and_network', fake_create_domain_and_network) self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin', fake_enable_hairpin) self.stubs.Set(utils, 'execute', fake_execute) fw = base_firewall.NoopFirewallDriver() self.stubs.Set(self.drvr, 'firewall_driver', fw) self.stubs.Set(self.drvr, 'get_info', fake_get_info) instance = self._create_instance({'config_drive': str(True)}) migration = objects.Migration() migration.source_compute = 'fake-source-compute' migration.dest_compute = 'fake-dest-compute' migration.source_node = 'fake-source-node' migration.dest_node = 'fake-dest-node' image_meta = objects.ImageMeta.from_dict(self.test_image_meta) # Source disks are raw to test conversion disk_info = fake_disk_info_json(instance, type='raw') with test.nested( mock.patch.object(self.drvr, '_disk_raw_to_qcow2', autospec=True), mock.patch.object(self.drvr, '_ensure_console_log_for_instance') ) as (mock_raw_to_qcow2, mock_ensure_console_log): self.drvr.finish_migration( context.get_admin_context(), migration, instance, disk_info, [], image_meta, resize_instance, None, power_on) mock_ensure_console_log.assert_called_once_with(instance) # Assert that we converted the root and ephemeral disks instance_path = libvirt_utils.get_instance_path(instance) convert_calls = [mock.call(os.path.join(instance_path, name)) for name in ('disk', 'disk.local')] mock_raw_to_qcow2.assert_has_calls(convert_calls, any_order=True) # Implicitly assert that we did not convert the config disk self.assertEqual(len(convert_calls), mock_raw_to_qcow2.call_count) self.assertTrue(self.fake_create_domain_called) self.assertEqual( resize_instance, self.fake_disk_resize_called) def test_finish_migration_resize(self): self._test_finish_migration(True, resize_instance=True) def test_finish_migration_power_on(self): self._test_finish_migration(True) def test_finish_migration_power_off(self): self._test_finish_migration(False) def _test_finish_revert_migration(self, power_on): """Test for nova.virt.libvirt.libvirt_driver.LivirtConnection .finish_revert_migration. """ powered_on = power_on self.fake_create_domain_called = False def fake_execute(*args, **kwargs): pass def fake_plug_vifs(instance, network_info): pass def fake_create_domain(context, xml, instance, network_info, disk_info, block_device_info=None, power_on=None, vifs_already_plugged=None): self.fake_create_domain_called = True self.assertEqual(powered_on, power_on) self.assertTrue(vifs_already_plugged) return mock.MagicMock() def fake_enable_hairpin(): pass def fake_get_info(instance): if powered_on: return hardware.InstanceInfo(state=power_state.RUNNING) else: return hardware.InstanceInfo(state=power_state.SHUTDOWN) def fake_to_xml(context, instance, network_info, disk_info, image_meta=None, rescue=None, block_device_info=None): return "" self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml) self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs) self.stubs.Set(utils, 'execute', fake_execute) fw = base_firewall.NoopFirewallDriver() self.stubs.Set(self.drvr, 'firewall_driver', fw) self.stubs.Set(self.drvr, '_create_domain_and_network', fake_create_domain) self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin', fake_enable_hairpin) self.stubs.Set(self.drvr, 'get_info', fake_get_info) self.stubs.Set(utils, 'get_image_from_system_metadata', lambda *a: self.test_image_meta) with utils.tempdir() as tmpdir: self.flags(instances_path=tmpdir) ins_ref = self._create_instance() os.mkdir(os.path.join(tmpdir, ins_ref['name'])) libvirt_xml_path = os.path.join(tmpdir, ins_ref['name'], 'libvirt.xml') f = open(libvirt_xml_path, 'w') f.close() self.drvr.finish_revert_migration( context.get_admin_context(), ins_ref, [], None, power_on) self.assertTrue(self.fake_create_domain_called) def test_finish_revert_migration_power_on(self): self._test_finish_revert_migration(True) def test_finish_revert_migration_power_off(self): self._test_finish_revert_migration(False) def _test_finish_revert_migration_after_crash(self, backup_made=True, del_inst_failed=False): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend context = 'fake_context' ins_ref = self._create_instance() with test.nested( mock.patch.object(os.path, 'exists', return_value=backup_made), mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(utils, 'execute'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(drvr, '_get_guest_xml'), mock.patch.object(shutil, 'rmtree'), mock.patch.object(loopingcall, 'FixedIntervalLoopingCall'), ) as (mock_stat, mock_path, mock_exec, mock_cdn, mock_ggx, mock_rmtree, mock_looping_call): mock_path.return_value = '/fake/foo' if del_inst_failed: mock_rmtree.side_effect = OSError(errno.ENOENT, 'test exception') drvr.finish_revert_migration(context, ins_ref, []) if backup_made: mock_exec.assert_called_once_with('mv', '/fake/foo_resize', '/fake/foo') else: self.assertFalse(mock_exec.called) def test_finish_revert_migration_after_crash(self): self._test_finish_revert_migration_after_crash(backup_made=True) def test_finish_revert_migration_after_crash_before_new(self): self._test_finish_revert_migration_after_crash(backup_made=True) def test_finish_revert_migration_after_crash_before_backup(self): self._test_finish_revert_migration_after_crash(backup_made=False) def test_finish_revert_migration_after_crash_delete_failed(self): self._test_finish_revert_migration_after_crash(backup_made=True, del_inst_failed=True) def test_finish_revert_migration_preserves_disk_bus(self): def fake_get_guest_xml(context, instance, network_info, disk_info, image_meta, block_device_info=None): self.assertEqual('ide', disk_info['disk_bus']) image_meta = {"disk_format": "raw", "properties": {"hw_disk_bus": "ide"}} instance = self._create_instance() drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(utils, 'get_image_from_system_metadata', return_value=image_meta), mock.patch.object(drvr, '_get_guest_xml', side_effect=fake_get_guest_xml)): drvr.finish_revert_migration('', instance, None, power_on=False) def test_finish_revert_migration_snap_backend(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend ins_ref = self._create_instance() with test.nested( mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(drvr, '_get_guest_xml')) as ( mock_image, mock_cdn, mock_ggx): mock_image.return_value = {'disk_format': 'raw'} drvr.finish_revert_migration('', ins_ref, None, power_on=False) drvr.image_backend.rollback_to_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME) drvr.image_backend.remove_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) def test_finish_revert_migration_snap_backend_snapshot_not_found(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend ins_ref = self._create_instance() with test.nested( mock.patch.object(rbd_utils, 'RBDDriver'), mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(drvr, '_get_guest_xml')) as ( mock_rbd, mock_image, mock_cdn, mock_ggx): mock_image.return_value = {'disk_format': 'raw'} mock_rbd.rollback_to_snap.side_effect = exception.SnapshotNotFound( snapshot_id='testing') drvr.finish_revert_migration('', ins_ref, None, power_on=False) drvr.image_backend.remove_snap.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) def test_finish_revert_migration_snap_backend_image_does_not_exist(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend drvr.image_backend.exists.return_value = False ins_ref = self._create_instance() with test.nested( mock.patch.object(rbd_utils, 'RBDDriver'), mock.patch.object(utils, 'get_image_from_system_metadata'), mock.patch.object(drvr, '_create_domain_and_network'), mock.patch.object(drvr, '_get_guest_xml')) as ( mock_rbd, mock_image, mock_cdn, mock_ggx): mock_image.return_value = {'disk_format': 'raw'} drvr.finish_revert_migration('', ins_ref, None, power_on=False) self.assertFalse(drvr.image_backend.rollback_to_snap.called) self.assertFalse(drvr.image_backend.remove_snap.called) def test_cleanup_failed_migration(self): self.mox.StubOutWithMock(shutil, 'rmtree') shutil.rmtree('/fake/inst') self.mox.ReplayAll() self.drvr._cleanup_failed_migration('/fake/inst') def test_confirm_migration(self): ins_ref = self._create_instance() self.mox.StubOutWithMock(self.drvr, "_cleanup_resize") self.drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) self.mox.ReplayAll() self.drvr.confirm_migration("migration_ref", ins_ref, _fake_network_info(self, 1)) def test_cleanup_resize_same_host(self): CONF.set_override('policy_dirs', [], group='oslo_policy') ins_ref = self._create_instance({'host': CONF.host}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend with test.nested( mock.patch.object(os.path, 'exists'), mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(utils, 'execute')) as ( mock_exists, mock_get_path, mock_exec): mock_exists.return_value = True mock_get_path.return_value = '/fake/inst' drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) mock_get_path.assert_called_once_with(ins_ref) mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) def test_cleanup_resize_not_same_host(self): CONF.set_override('policy_dirs', [], group='oslo_policy') host = 'not' + CONF.host ins_ref = self._create_instance({'host': host}) fake_net = _fake_network_info(self, 1) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend with test.nested( mock.patch.object(os.path, 'exists'), mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(utils, 'execute'), mock.patch.object(drvr, '_undefine_domain'), mock.patch.object(drvr, 'unplug_vifs'), mock.patch.object(drvr, 'unfilter_instance') ) as (mock_exists, mock_get_path, mock_exec, mock_undef, mock_unplug, mock_unfilter): mock_exists.return_value = True mock_get_path.return_value = '/fake/inst' drvr._cleanup_resize(ins_ref, fake_net) mock_get_path.assert_called_once_with(ins_ref) mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) mock_undef.assert_called_once_with(ins_ref) mock_unplug.assert_called_once_with(ins_ref, fake_net) mock_unfilter.assert_called_once_with(ins_ref, fake_net) def test_cleanup_resize_snap_backend(self): CONF.set_override('policy_dirs', [], group='oslo_policy') ins_ref = self._create_instance({'host': CONF.host}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend with test.nested( mock.patch.object(os.path, 'exists'), mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(utils, 'execute'), mock.patch.object(drvr.image_backend, 'remove_snap')) as ( mock_exists, mock_get_path, mock_exec, mock_remove): mock_exists.return_value = True mock_get_path.return_value = '/fake/inst' drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) mock_get_path.assert_called_once_with(ins_ref) mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) mock_remove.assert_called_once_with( libvirt_utils.RESIZE_SNAPSHOT_NAME, ignore_errors=True) def test_cleanup_resize_snap_backend_image_does_not_exist(self): CONF.set_override('policy_dirs', [], group='oslo_policy') ins_ref = self._create_instance({'host': CONF.host}) drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) drvr.image_backend = mock.Mock() drvr.image_backend.image.return_value = drvr.image_backend drvr.image_backend.exists.return_value = False with test.nested( mock.patch.object(os.path, 'exists'), mock.patch.object(libvirt_utils, 'get_instance_path'), mock.patch.object(utils, 'execute'), mock.patch.object(drvr.image_backend, 'remove_snap')) as ( mock_exists, mock_get_path, mock_exec, mock_remove): mock_exists.return_value = True mock_get_path.return_value = '/fake/inst' drvr._cleanup_resize(ins_ref, _fake_network_info(self, 1)) mock_get_path.assert_called_once_with(ins_ref) mock_exec.assert_called_once_with('rm', '-rf', '/fake/inst_resize', delay_on_retry=True, attempts=5) self.assertFalse(mock_remove.called) def test_get_instance_disk_info_exception(self): instance = self._create_instance() class FakeExceptionDomain(FakeVirtDomain): def __init__(self): super(FakeExceptionDomain, self).__init__() def XMLDesc(self, flags): raise fakelibvirt.libvirtError("Libvirt error") def fake_get_domain(self, instance): return FakeExceptionDomain() self.stubs.Set(host.Host, 'get_domain', fake_get_domain) self.assertRaises(exception.InstanceNotFound, self.drvr.get_instance_disk_info, instance) @mock.patch('os.path.exists') @mock.patch.object(lvm, 'list_volumes') def test_lvm_disks(self, listlvs, exists): instance = objects.Instance(uuid=uuids.instance, id=1) self.flags(images_volume_group='vols', group='libvirt') exists.return_value = True listlvs.return_value = ['%s_foo' % uuids.instance, 'other-uuid_foo'] disks = self.drvr._lvm_disks(instance) self.assertEqual(['/dev/vols/%s_foo' % uuids.instance], disks) def test_is_booted_from_volume(self): func = libvirt_driver.LibvirtDriver._is_booted_from_volume instance, disk_mapping = {}, {} self.assertTrue(func(instance, disk_mapping)) disk_mapping['disk'] = 'map' self.assertTrue(func(instance, disk_mapping)) instance['image_ref'] = 'uuid' self.assertFalse(func(instance, disk_mapping)) @mock.patch( 'nova.virt.libvirt.driver.LibvirtDriver._try_fetch_image_cache') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._inject_data') @mock.patch('nova.virt.libvirt.driver.imagecache') def test_data_not_injects_with_configdrive(self, mock_image, mock_inject, mock_fetch): self.flags(inject_partition=-1, group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) # config_drive is True by default, configdrive.required_by() # returns True instance_ref = self._create_instance() disk_images = {'image_id': None} drvr._create_and_inject_local_root(self.context, instance_ref, False, '', disk_images, [], None, [], True, None) self.assertFalse(mock_inject.called) @mock.patch('nova.virt.netutils.get_injected_network_template') @mock.patch('nova.virt.disk.api.inject_data') @mock.patch.object(libvirt_driver.LibvirtDriver, "_conn") def _test_inject_data(self, driver_params, path, disk_params, mock_conn, disk_inject_data, inj_network, called=True): class ImageBackend(object): path = '/path' def get_model(self, connection): return imgmodel.LocalFileImage(self.path, imgmodel.FORMAT_RAW) def fake_inj_network(*args, **kwds): return args[0] or None inj_network.side_effect = fake_inj_network image_backend = ImageBackend() image_backend.path = path with mock.patch.object( self.drvr.image_backend, 'image', return_value=image_backend): self.flags(inject_partition=0, group='libvirt') self.drvr._inject_data(image_backend, **driver_params) if called: disk_inject_data.assert_called_once_with( mock.ANY, *disk_params, partition=None, mandatory=('files',)) self.assertEqual(disk_inject_data.called, called) def _test_inject_data_default_driver_params(self, **params): return { 'instance': self._create_instance(params=params), 'network_info': None, 'admin_pass': None, 'files': None } def test_inject_data_adminpass(self): self.flags(inject_password=True, group='libvirt') driver_params = self._test_inject_data_default_driver_params() driver_params['admin_pass'] = 'foobar' disk_params = [ None, # key None, # net {}, # metadata 'foobar', # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) # Test with the configuration setted to false. self.flags(inject_password=False, group='libvirt') self._test_inject_data(driver_params, "/path", disk_params, called=False) def test_inject_data_key(self): driver_params = self._test_inject_data_default_driver_params() driver_params['instance']['key_data'] = 'key-content' self.flags(inject_key=True, group='libvirt') disk_params = [ 'key-content', # key None, # net {}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) # Test with the configuration setted to false. self.flags(inject_key=False, group='libvirt') self._test_inject_data(driver_params, "/path", disk_params, called=False) def test_inject_data_metadata(self): instance_metadata = {'metadata': {'data': 'foo'}} driver_params = self._test_inject_data_default_driver_params( **instance_metadata ) disk_params = [ None, # key None, # net {'data': 'foo'}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_data_files(self): driver_params = self._test_inject_data_default_driver_params() driver_params['files'] = ['file1', 'file2'] disk_params = [ None, # key None, # net {}, # metadata None, # admin_pass ['file1', 'file2'], # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_data_net(self): driver_params = self._test_inject_data_default_driver_params() driver_params['network_info'] = {'net': 'eno1'} disk_params = [ None, # key {'net': 'eno1'}, # net {}, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/path", disk_params) def test_inject_not_exist_image(self): driver_params = self._test_inject_data_default_driver_params() disk_params = [ 'key-content', # key None, # net None, # metadata None, # admin_pass None, # files ] self._test_inject_data(driver_params, "/fail/path", disk_params, called=False) def _test_attach_detach_interface(self, method, power_state, expected_flags): instance = self._create_instance() network_info = _fake_network_info(self, 1) domain = FakeVirtDomain() self.mox.StubOutWithMock(host.Host, 'get_domain') self.mox.StubOutWithMock(self.drvr.firewall_driver, 'setup_basic_filtering') self.mox.StubOutWithMock(domain, 'attachDeviceFlags') self.mox.StubOutWithMock(domain, 'info') host.Host.get_domain(instance).AndReturn(domain) if method == 'attach_interface': self.drvr.firewall_driver.setup_basic_filtering( instance, [network_info[0]]) fake_image_meta = objects.ImageMeta.from_dict( {'id': instance.image_ref}) expected = self.drvr.vif_driver.get_config( instance, network_info[0], fake_image_meta, instance.flavor, CONF.libvirt.virt_type, self.drvr._host) self.mox.StubOutWithMock(self.drvr.vif_driver, 'get_config') self.drvr.vif_driver.get_config( instance, network_info[0], mox.IsA(objects.ImageMeta), mox.IsA(objects.Flavor), CONF.libvirt.virt_type, self.drvr._host).AndReturn(expected) domain.info().AndReturn([power_state, 1, 2, 3, 4]) if method == 'attach_interface': domain.attachDeviceFlags(expected.to_xml(), flags=expected_flags) elif method == 'detach_interface': domain.detachDeviceFlags(expected.to_xml(), expected_flags) self.mox.ReplayAll() if method == 'attach_interface': self.drvr.attach_interface( instance, fake_image_meta, network_info[0]) elif method == 'detach_interface': self.drvr.detach_interface( instance, network_info[0]) self.mox.VerifyAll() def test_attach_interface_with_running_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.RUNNING, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_attach_interface_with_pause_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.PAUSED, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_attach_interface_with_shutdown_instance(self): self._test_attach_detach_interface( 'attach_interface', power_state.SHUTDOWN, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)) def test_detach_interface_with_running_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.RUNNING, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_interface_with_pause_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.PAUSED, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG | fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)) def test_detach_interface_with_shutdown_instance(self): self._test_attach_detach_interface( 'detach_interface', power_state.SHUTDOWN, expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG)) @mock.patch('nova.virt.libvirt.driver.LOG') def test_detach_interface_device_not_found(self, mock_log): # Asserts that we don't log an error when the interface device is not # found on the guest after a libvirt error during detach. instance = self._create_instance() vif = _fake_network_info(self, 1)[0] guest = mock.Mock(spec='nova.virt.libvirt.guest.Guest') guest.get_power_state = mock.Mock() self.drvr._host.get_guest = mock.Mock(return_value=guest) self.drvr.vif_driver = mock.Mock() error = fakelibvirt.libvirtError( 'no matching network device was found') error.err = (fakelibvirt.VIR_ERR_OPERATION_FAILED,) guest.detach_device = mock.Mock(side_effect=error) # mock out that get_interface_by_mac doesn't find the interface guest.get_interface_by_mac = mock.Mock(return_value=None) self.drvr.detach_interface(instance, vif) guest.get_interface_by_mac.assert_called_once_with(vif['address']) # an error shouldn't be logged, but a warning should be logged self.assertFalse(mock_log.error.called) self.assertEqual(1, mock_log.warning.call_count) self.assertIn('the device is no longer found on the guest', six.text_type(mock_log.warning.call_args[0])) @mock.patch('nova.virt.libvirt.utils.write_to_file') # NOTE(mdbooth): The following 4 mocks are required to execute # get_guest_xml(). @mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled') @mock.patch.object(libvirt_driver.LibvirtDriver, '_build_device_metadata') @mock.patch.object(libvirt_driver.LibvirtDriver, '_supports_direct_io') @mock.patch('nova.api.metadata.base.InstanceMetadata') def _test_rescue(self, instance, mock_instance_metadata, mock_supports_direct_io, mock_build_device_metadata, mock_set_host_enabled, mock_write_to_file, exists=None): self.flags(instances_path=self.useFixture(fixtures.TempDir()).path) mock_build_device_metadata.return_value = None mock_supports_direct_io.return_value = True backend = self.useFixture( fake_imagebackend.ImageBackendFixture(exists=exists)) image_meta = objects.ImageMeta.from_dict( {'id': uuids.image_id, 'name': 'fake'}) network_info = _fake_network_info(self, 1) rescue_password = 'fake_password' domain_xml = [None] def fake_create_domain(xml=None, domain=None, power_on=True, pause=False, post_xml_callback=None): domain_xml[0] = xml if post_xml_callback is not None: post_xml_callback() with mock.patch.object( self.drvr, '_create_domain', side_effect=fake_create_domain) as mock_create_domain: self.drvr.rescue(self.context, instance, network_info, image_meta, rescue_password) self.assertTrue(mock_create_domain.called) return backend, etree.fromstring(domain_xml[0]) def test_rescue(self): instance = self._create_instance({'config_drive': None}) backend, doc = self._test_rescue(instance) # Assert that we created the expected set of disks, and no others self.assertEqual(['disk.rescue', 'kernel.rescue', 'ramdisk.rescue'], sorted(backend.created_disks.keys())) disks = backend.disks kernel_ramdisk = [disks[name + '.rescue'] for name in ('kernel', 'ramdisk')] # Assert that kernel and ramdisk were both created as raw for disk in kernel_ramdisk: self.assertEqual('raw', disk.image_type) # Assert that the root rescue disk was created as the default type self.assertIsNone(disks['disk.rescue'].image_type) # We expect the generated domain to contain disk.rescue and # disk, in that order expected_domain_disk_paths = map( lambda name: disks[name].path, ('disk.rescue', 'disk')) domain_disk_paths = doc.xpath('devices/disk/source/@file') self.assertEqual(expected_domain_disk_paths, domain_disk_paths) # The generated domain xml should contain the rescue kernel # and ramdisk expected_kernel_ramdisk_paths = map( lambda disk: os.path.join(CONF.instances_path, disk.path), kernel_ramdisk) kernel_ramdisk_paths = \ doc.xpath('os/*[self::initrd|self::kernel]/text()') self.assertEqual(expected_kernel_ramdisk_paths, kernel_ramdisk_paths) def test_rescue_config_drive(self): instance = self._create_instance({'config_drive': str(True)}) backend, doc = self._test_rescue( instance, exists=lambda name: name != 'disk.config.rescue') # Assert that we created the expected set of disks, and no others self.assertEqual(['disk.config.rescue', 'disk.rescue', 'kernel.rescue', 'ramdisk.rescue'], sorted(backend.created_disks.keys())) disks = backend.disks config_disk = disks['disk.config.rescue'] kernel_ramdisk = [disks[name + '.rescue'] for name in ('kernel', 'ramdisk')] # Assert that we imported the config disk self.assertTrue(config_disk.import_file.called) # Assert that the config disk, kernel and ramdisk were created as raw for disk in [config_disk] + kernel_ramdisk: self.assertEqual('raw', disk.image_type) # Assert that the root rescue disk was created as the default type self.assertIsNone(disks['disk.rescue'].image_type) # We expect the generated domain to contain disk.rescue, disk, and # disk.config.rescue in that order expected_domain_disk_paths = map( lambda name: disks[name].path, ('disk.rescue', 'disk', 'disk.config.rescue')) domain_disk_paths = doc.xpath('devices/disk/source/@file') self.assertEqual(expected_domain_disk_paths, domain_disk_paths) # The generated domain xml should contain the rescue kernel # and ramdisk expected_kernel_ramdisk_paths = map( lambda disk: os.path.join(CONF.instances_path, disk.path), kernel_ramdisk) kernel_ramdisk_paths = \ doc.xpath('os/*[self::initrd|self::kernel]/text()') self.assertEqual(expected_kernel_ramdisk_paths, kernel_ramdisk_paths) @mock.patch.object(libvirt_utils, 'get_instance_path') @mock.patch.object(libvirt_utils, 'load_file') @mock.patch.object(host.Host, "get_domain") def test_unrescue(self, mock_get_domain, mock_load_file, mock_get_instance_path): dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" "<devices>" "<disk type='block' device='disk'>" "<source dev='/dev/some-vg/some-lv'/>" "<target dev='vda' bus='virtio'/></disk>" "</devices></domain>") mock_get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) fake_dom = FakeVirtDomain(fake_xml=dummyxml) mock_get_domain.return_value = fake_dom mock_load_file.return_value = "fake_unrescue_xml" unrescue_xml_path = os.path.join('/path', 'unrescue.xml') xml_path = os.path.join('/path', 'libvirt.xml') rescue_file = os.path.join('/path', 'rescue.file') rescue_dir = os.path.join('/path', 'rescue.dir') def isdir_sideeffect(*args, **kwargs): if args[0] == '/path/rescue.file': return False if args[0] == '/path/rescue.dir': return True drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) with test.nested( mock.patch.object(libvirt_utils, 'write_to_file'), mock.patch.object(drvr, '_destroy'), mock.patch.object(drvr, '_create_domain'), mock.patch.object(libvirt_utils, 'file_delete'), mock.patch.object(shutil, 'rmtree'), mock.patch.object(os.path, "isdir", side_effect=isdir_sideeffect), mock.patch.object(drvr, '_lvm_disks', return_value=['lvm.rescue']), mock.patch.object(lvm, 'remove_volumes'), mock.patch.object(glob, 'iglob', return_value=[rescue_file, rescue_dir]) ) as (mock_write, mock_destroy, mock_create, mock_del, mock_rmtree, mock_isdir, mock_lvm_disks, mock_remove_volumes, mock_glob): drvr.unrescue(instance, None) mock_write.assert_called_once_with(xml_path, "fake_unrescue_xml") mock_destroy.assert_called_once_with(instance) mock_create.assert_called_once_with("fake_unrescue_xml", fake_dom) self.assertEqual(2, mock_del.call_count) self.assertEqual(unrescue_xml_path, mock_del.call_args_list[0][0][0]) self.assertEqual(1, mock_rmtree.call_count) self.assertEqual(rescue_dir, mock_rmtree.call_args_list[0][0][0]) self.assertEqual(rescue_file, mock_del.call_args_list[1][0][0]) mock_remove_volumes.assert_called_once_with(['lvm.rescue']) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') shutil.assert_called_with('/path_del') self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('os.kill') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_kill_running( self, get_instance_path, kill, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) self.drvr.job_tracker.jobs[instance.uuid] = [3, 4] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') kill.assert_has_calls([mock.call(3, signal.SIGKILL), mock.call(3, 0), mock.call(4, signal.SIGKILL), mock.call(4, 0)]) shutil.assert_called_with('/path_del') self.assertTrue(result) self.assertNotIn(instance.uuid, self.drvr.job_tracker.jobs) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_resize(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) nova.utils.execute.side_effect = [Exception(), None] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] self.assertEqual(expected, exe.mock_calls) shutil.assert_called_with('/path_del') self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_failed(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) exists.side_effect = [False, False, True, True] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) exe.assert_called_with('mv', '/path', '/path_del') shutil.assert_called_with('/path_del') self.assertFalse(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_mv_failed(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [True, True] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertFalse(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_resume(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_none(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) nova.utils.execute.side_effect = Exception() exists.side_effect = [False, False, False, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] * 2 self.assertEqual(expected, exe.mock_calls) self.assertEqual(0, len(shutil.mock_calls)) self.assertTrue(result) @mock.patch('shutil.rmtree') @mock.patch('nova.utils.execute') @mock.patch('os.path.exists') @mock.patch('nova.virt.libvirt.utils.get_instance_path') def test_delete_instance_files_concurrent(self, get_instance_path, exists, exe, shutil): get_instance_path.return_value = '/path' instance = objects.Instance(uuid=uuids.instance, id=1) nova.utils.execute.side_effect = [Exception(), Exception(), None] exists.side_effect = [False, False, True, False] result = self.drvr.delete_instance_files(instance) get_instance_path.assert_called_with(instance) expected = [mock.call('mv', '/path', '/path_del'), mock.call('mv', '/path_resize', '/path_del')] expected.append(expected[0]) self.assertEqual(expected, exe.mock_calls) shutil.assert_called_with('/path_del') self.assertTrue(result) def _assert_on_id_map(self, idmap, klass, start, target, count): self.assertIsInstance(idmap, klass) self.assertEqual(start, idmap.start) self.assertEqual(target, idmap.target) self.assertEqual(count, idmap.count) def test_get_id_maps(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.virt_type = "lxc" CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(len(idmaps), 4) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestUIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestUIDMap, 1, 20000, 10) self._assert_on_id_map(idmaps[2], vconfig.LibvirtConfigGuestGIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[3], vconfig.LibvirtConfigGuestGIDMap, 1, 20000, 10) def test_get_id_maps_not_lxc(self): CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(0, len(idmaps)) def test_get_id_maps_only_uid(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"] CONF.libvirt.gid_maps = [] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(2, len(idmaps)) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestUIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestUIDMap, 1, 20000, 10) def test_get_id_maps_only_gid(self): self.flags(virt_type="lxc", group="libvirt") CONF.libvirt.uid_maps = [] CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"] idmaps = self.drvr._get_guest_idmaps() self.assertEqual(2, len(idmaps)) self._assert_on_id_map(idmaps[0], vconfig.LibvirtConfigGuestGIDMap, 0, 10000, 1) self._assert_on_id_map(idmaps[1], vconfig.LibvirtConfigGuestGIDMap, 1, 20000, 10) def test_instance_on_disk(self): drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(uuid=uuids.instance, id=1) self.assertFalse(drvr.instance_on_disk(instance)) def test_instance_on_disk_rbd(self): self.flags(images_type='rbd', group='libvirt') drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) instance = objects.Instance(uuid=uuids.instance, id=1) self.assertTrue(drvr.instance_on_disk(instance)) def test_get_disk_xml(self): dom_xml = """ <domain type="kvm"> <devices> <disk type="file"> <source file="disk1_file"/> <target dev="vda" bus="virtio"/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> <disk type="block"> <source dev="/path/to/dev/1"/> <target dev="vdb" bus="virtio" serial="1234"/> </disk> </devices> </domain> """ diska_xml = """<disk type="file" device="disk"> <source file="disk1_file"/> <target bus="virtio" dev="vda"/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk>""" diskb_xml = """<disk type="block" device="disk"> <source dev="/path/to/dev/1"/> <target bus="virtio" dev="vdb"/> </disk>""" dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) # NOTE(gcb): etree.tostring(node) returns an extra line with # some white spaces, need to strip it. actual_diska_xml = guest.get_disk('vda').to_xml() self.assertEqual(diska_xml.strip(), actual_diska_xml.strip()) actual_diskb_xml = guest.get_disk('vdb').to_xml() self.assertEqual(diskb_xml.strip(), actual_diskb_xml.strip()) self.assertIsNone(guest.get_disk('vdc')) def test_vcpu_model_from_config(self): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) vcpu_model = drv._cpu_config_to_vcpu_model(None, None) self.assertIsNone(vcpu_model) cpu = vconfig.LibvirtConfigGuestCPU() feature1 = vconfig.LibvirtConfigGuestCPUFeature() feature2 = vconfig.LibvirtConfigGuestCPUFeature() feature1.name = 'sse' feature1.policy = cpumodel.POLICY_REQUIRE feature2.name = 'aes' feature2.policy = cpumodel.POLICY_REQUIRE cpu.features = set([feature1, feature2]) cpu.mode = cpumodel.MODE_CUSTOM cpu.sockets = 1 cpu.cores = 2 cpu.threads = 4 vcpu_model = drv._cpu_config_to_vcpu_model(cpu, None) self.assertEqual(cpumodel.MATCH_EXACT, vcpu_model.match) self.assertEqual(cpumodel.MODE_CUSTOM, vcpu_model.mode) self.assertEqual(4, vcpu_model.topology.threads) self.assertEqual(set(['sse', 'aes']), set([f.name for f in vcpu_model.features])) cpu.mode = cpumodel.MODE_HOST_MODEL vcpu_model_1 = drv._cpu_config_to_vcpu_model(cpu, vcpu_model) self.assertEqual(cpumodel.MODE_HOST_MODEL, vcpu_model.mode) self.assertEqual(vcpu_model, vcpu_model_1) @mock.patch.object(lvm, 'get_volume_size', return_value=10) @mock.patch.object(host.Host, "get_guest") @mock.patch.object(dmcrypt, 'delete_volume') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.unfilter_instance') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain') @mock.patch.object(objects.Instance, 'save') def test_cleanup_lvm_encrypted(self, mock_save, mock_undefine_domain, mock_unfilter, mock_delete_volume, mock_get_guest, mock_get_size): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance = objects.Instance( uuid=uuids.instance, id=1, ephemeral_key_uuid=uuids.ephemeral_key_uuid) instance.system_metadata = {} block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} self.flags(images_type="lvm", group='libvirt') dom_xml = """ <domain type="kvm"> <devices> <disk type="block"> <driver name='qemu' type='raw' cache='none'/> <source dev="/dev/mapper/fake-dmcrypt"/> <target dev="vda" bus="virtio" serial="1234"/> </disk> </devices> </domain> """ dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) mock_get_guest.return_value = guest drv.cleanup(self.context, instance, 'fake_network', destroy_vifs=False, block_device_info=block_device_info) mock_delete_volume.assert_called_once_with('/dev/mapper/fake-dmcrypt') @mock.patch.object(lvm, 'get_volume_size', return_value=10) @mock.patch.object(host.Host, "get_guest") @mock.patch.object(dmcrypt, 'delete_volume') def _test_cleanup_lvm(self, mock_delete_volume, mock_get_guest, mock_size, encrypted=False): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) instance = objects.Instance( uuid=uuids.instance, id=1, ephemeral_key_uuid=uuids.ephemeral_key_uuid) block_device_info = {'root_device_name': '/dev/vda', 'ephemerals': [], 'block_device_mapping': []} dev_name = 'fake-dmcrypt' if encrypted else 'fake' dom_xml = """ <domain type="kvm"> <devices> <disk type="block"> <driver name='qemu' type='raw' cache='none'/> <source dev="/dev/mapper/%s"/> <target dev="vda" bus="virtio" serial="1234"/> </disk> </devices> </domain> """ % dev_name dom = mock.MagicMock() dom.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(dom) mock_get_guest.return_value = guest drv._cleanup_lvm(instance, block_device_info) if encrypted: mock_delete_volume.assert_called_once_with( '/dev/mapper/fake-dmcrypt') else: self.assertFalse(mock_delete_volume.called) def test_cleanup_lvm(self): self._test_cleanup_lvm() def test_cleanup_encrypted_lvm(self): self._test_cleanup_lvm(encrypted=True) def test_vcpu_model_to_config(self): drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) feature = objects.VirtCPUFeature(policy=cpumodel.POLICY_REQUIRE, name='sse') feature_1 = objects.VirtCPUFeature(policy=cpumodel.POLICY_FORBID, name='aes') topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4) vcpu_model = objects.VirtCPUModel(mode=cpumodel.MODE_HOST_MODEL, features=[feature, feature_1], topology=topo) cpu = drv._vcpu_model_to_cpu_config(vcpu_model) self.assertEqual(cpumodel.MODE_HOST_MODEL, cpu.mode) self.assertEqual(1, cpu.sockets) self.assertEqual(4, cpu.threads) self.assertEqual(2, len(cpu.features)) self.assertEqual(set(['sse', 'aes']), set([f.name for f in cpu.features])) self.assertEqual(set([cpumodel.POLICY_REQUIRE, cpumodel.POLICY_FORBID]), set([f.policy for f in cpu.features])) def test_trigger_crash_dump(self): mock_guest = mock.Mock(libvirt_guest.Guest, id=1) instance = objects.Instance(uuid=uuids.instance, id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.drvr.trigger_crash_dump(instance) def test_trigger_crash_dump_not_running(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'Requested operation is not valid: domain is not running', error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.inject_nmi = mock.Mock(side_effect=ex) instance = objects.Instance(uuid=uuids.instance, id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(exception.InstanceNotRunning, self.drvr.trigger_crash_dump, instance) def test_trigger_crash_dump_not_supported(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, '', error_code=fakelibvirt.VIR_ERR_NO_SUPPORT) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.inject_nmi = mock.Mock(side_effect=ex) instance = objects.Instance(uuid=uuids.instance, id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(exception.TriggerCrashDumpNotSupported, self.drvr.trigger_crash_dump, instance) def test_trigger_crash_dump_unexpected_error(self): ex = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'UnexpectedError', error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR) mock_guest = mock.Mock(libvirt_guest.Guest, id=1) mock_guest.inject_nmi = mock.Mock(side_effect=ex) instance = objects.Instance(uuid=uuids.instance, id=1) with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.assertRaises(fakelibvirt.libvirtError, self.drvr.trigger_crash_dump, instance) class LibvirtVolumeUsageTestCase(test.NoDBTestCase): """Test for LibvirtDriver.get_all_volume_usage.""" def setUp(self): super(LibvirtVolumeUsageTestCase, self).setUp() self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.c = context.get_admin_context() self.ins_ref = objects.Instance( id=1729, uuid='875a8070-d0b9-4949-8b31-104d125c9a64' ) # verify bootable volume device path also self.bdms = [{'volume_id': 1, 'device_name': '/dev/vde'}, {'volume_id': 2, 'device_name': 'vda'}] def test_get_all_volume_usage(self): def fake_block_stats(instance_name, disk): return (169, 688640, 0, 0, -1) self.stubs.Set(self.drvr, 'block_stats', fake_block_stats) vol_usage = self.drvr.get_all_volume_usage(self.c, [dict(instance=self.ins_ref, instance_bdms=self.bdms)]) expected_usage = [{'volume': 1, 'instance': self.ins_ref, 'rd_bytes': 688640, 'wr_req': 0, 'rd_req': 169, 'wr_bytes': 0}, {'volume': 2, 'instance': self.ins_ref, 'rd_bytes': 688640, 'wr_req': 0, 'rd_req': 169, 'wr_bytes': 0}] self.assertEqual(vol_usage, expected_usage) def test_get_all_volume_usage_device_not_found(self): def fake_get_domain(self, instance): raise exception.InstanceNotFound(instance_id="fakedom") self.stubs.Set(host.Host, 'get_domain', fake_get_domain) vol_usage = self.drvr.get_all_volume_usage(self.c, [dict(instance=self.ins_ref, instance_bdms=self.bdms)]) self.assertEqual(vol_usage, []) class LibvirtNonblockingTestCase(test.NoDBTestCase): """Test libvirtd calls are nonblocking.""" def setUp(self): super(LibvirtNonblockingTestCase, self).setUp() self.flags(connection_uri="test:///default", group='libvirt') def test_connection_to_primitive(self): # Test bug 962840. import nova.virt.libvirt.driver as libvirt_driver drvr = libvirt_driver.LibvirtDriver('') drvr.set_host_enabled = mock.Mock() jsonutils.to_primitive(drvr._conn, convert_instances=True) @mock.patch.object(objects.Service, 'get_by_compute_host') def test_tpool_execute_calls_libvirt(self, mock_svc): conn = fakelibvirt.virConnect() conn.is_expected = True self.mox.StubOutWithMock(eventlet.tpool, 'execute') eventlet.tpool.execute( fakelibvirt.openAuth, 'test:///default', mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(conn) eventlet.tpool.execute( conn.domainEventRegisterAny, None, fakelibvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, mox.IgnoreArg(), mox.IgnoreArg()) if hasattr(fakelibvirt.virConnect, 'registerCloseCallback'): eventlet.tpool.execute( conn.registerCloseCallback, mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True) c = driver._get_connection() self.assertTrue(c.is_expected) class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase): """Tests for libvirtDriver.volume_snapshot_create/delete.""" def setUp(self): super(LibvirtVolumeSnapshotTestCase, self).setUp() self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) self.c = context.get_admin_context() self.flags(instance_name_template='instance-%s') self.flags(qemu_allowed_storage_drivers=[], group='libvirt') # creating instance self.inst = {} self.inst['uuid'] = uuidutils.generate_uuid() self.inst['id'] = '1' # create domain info self.dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='disk1_file'/> <target dev='vda' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> <disk type='block'> <source dev='/path/to/dev/1'/> <target dev='vdb' bus='virtio' serial='1234'/> </disk> </devices> </domain>""" # alternate domain info with network-backed snapshot chain self.dom_netdisk_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='disk1_file'/> <target dev='vda' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial> </disk> <disk type='network' device='disk'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/root.img'> <host name='server1' port='24007'/> </source> <backingStore type='network' index='1'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/snap.img'> <host name='server1' port='24007'/> </source> <backingStore type='network' index='2'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/snap-b.img'> <host name='server1' port='24007'/> </source> <backingStore/> </backingStore> </backingStore> <target dev='vdb' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> </devices> </domain> """ # XML with netdisk attached, and 1 snapshot taken self.dom_netdisk_xml_2 = """ <domain type='kvm'> <devices> <disk type='file'> <source file='disk1_file'/> <target dev='vda' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial> </disk> <disk type='network' device='disk'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/snap.img'> <host name='server1' port='24007'/> </source> <backingStore type='network' index='1'> <driver name='qemu' type='qcow2'/> <source protocol='gluster' name='vol1/root.img'> <host name='server1' port='24007'/> </source> <backingStore/> </backingStore> <target dev='vdb' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> </devices> </domain> """ self.create_info = {'type': 'qcow2', 'snapshot_id': '1234-5678', 'new_file': 'new-file'} self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d' self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162' self.delete_info_1 = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': None} self.delete_info_2 = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': 'other-snap.img'} self.delete_info_3 = {'type': 'qcow2', 'file_to_merge': None, 'merge_target_file': None} self.delete_info_netdisk = {'type': 'qcow2', 'file_to_merge': 'snap.img', 'merge_target_file': 'root.img'} self.delete_info_invalid_type = {'type': 'made_up_type', 'file_to_merge': 'some_file', 'merge_target_file': 'some_other_file'} def tearDown(self): super(LibvirtVolumeSnapshotTestCase, self).tearDown() @mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.' 'refresh_connection_info') @mock.patch('nova.objects.block_device.BlockDeviceMapping.' 'get_by_volume_and_instance') def test_volume_refresh_connection_info(self, mock_get_by_volume_and_instance, mock_refresh_connection_info): instance = objects.Instance(**self.inst) fake_bdm = fake_block_device.FakeDbBlockDeviceDict({ 'id': 123, 'instance_uuid': uuids.instance, 'device_name': '/dev/sdb', 'source_type': 'volume', 'destination_type': 'volume', 'volume_id': 'fake-volume-id-1', 'connection_info': '{"fake": "connection_info"}'}) fake_bdm = objects.BlockDeviceMapping(self.c, **fake_bdm) mock_get_by_volume_and_instance.return_value = fake_bdm self.drvr._volume_refresh_connection_info(self.c, instance, self.volume_uuid) mock_get_by_volume_and_instance.assert_called_once_with( self.c, self.volume_uuid, instance.uuid) mock_refresh_connection_info.assert_called_once_with(self.c, instance, self.drvr._volume_api, self.drvr) def test_volume_snapshot_create(self, quiesce=True): """Test snapshot creation with file-based disk.""" self.flags(instance_name_template='instance-%s') self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') instance = objects.Instance(**self.inst) new_file = 'new-file' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') self.mox.StubOutWithMock(domain, 'snapshotCreateXML') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) snap_xml_src = ( '<domainsnapshot>\n' ' <disks>\n' ' <disk name="disk1_file" snapshot="external" type="file">\n' ' <source file="new-file"/>\n' ' </disk>\n' ' <disk name="vdb" snapshot="no"/>\n' ' </disks>\n' '</domainsnapshot>\n') # Older versions of libvirt may be missing these. fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) snap_flags_q = (snap_flags | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) if quiesce: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q) else: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q).\ AndRaise(fakelibvirt.libvirtError( 'quiescing failed, no qemu-ga')) domain.snapshotCreateXML(snap_xml_src, flags=snap_flags) self.mox.ReplayAll() guest = libvirt_guest.Guest(domain) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, new_file) self.mox.VerifyAll() def test_volume_snapshot_create_libgfapi(self, quiesce=True): """Test snapshot creation with libgfapi network disk.""" self.flags(instance_name_template = 'instance-%s') self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt') self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.dom_xml = """ <domain type='kvm'> <devices> <disk type='file'> <source file='disk1_file'/> <target dev='vda' bus='virtio'/> <serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial> </disk> <disk type='block'> <source protocol='gluster' name='gluster1/volume-1234'> <host name='127.3.4.5' port='24007'/> </source> <target dev='vdb' bus='virtio' serial='1234'/> </disk> </devices> </domain>""" instance = objects.Instance(**self.inst) new_file = 'new-file' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') self.mox.StubOutWithMock(domain, 'snapshotCreateXML') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) snap_xml_src = ( '<domainsnapshot>\n' ' <disks>\n' ' <disk name="disk1_file" snapshot="external" type="file">\n' ' <source file="new-file"/>\n' ' </disk>\n' ' <disk name="vdb" snapshot="no"/>\n' ' </disks>\n' '</domainsnapshot>\n') # Older versions of libvirt may be missing these. fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32 fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64 snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT) snap_flags_q = (snap_flags | fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE) if quiesce: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q) else: domain.snapshotCreateXML(snap_xml_src, flags=snap_flags_q).\ AndRaise(fakelibvirt.libvirtError( 'quiescing failed, no qemu-ga')) domain.snapshotCreateXML(snap_xml_src, flags=snap_flags) self.mox.ReplayAll() guest = libvirt_guest.Guest(domain) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, new_file) self.mox.VerifyAll() def test_volume_snapshot_create_noquiesce(self): self.test_volume_snapshot_create(quiesce=False) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict( {"properties": { "hw_qemu_guest_agent": "yes"}}) self.assertIsNone(self.drvr._can_quiesce(instance, image_meta)) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce_bad_hyp(self, ver): self.flags(virt_type='lxc', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict( {"properties": { "hw_qemu_guest_agent": "yes"}}) self.assertRaises(exception.InstanceQuiesceNotSupported, self.drvr._can_quiesce, instance, image_meta) @mock.patch.object(host.Host, 'has_min_version', return_value=False) def test_can_quiesce_bad_ver(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = {"properties": { "hw_qemu_guest_agent": "yes"}} self.assertRaises(exception.InstanceQuiesceNotSupported, self.drvr._can_quiesce, instance, image_meta) @mock.patch.object(host.Host, 'has_min_version', return_value=True) def test_can_quiesce_agent_not_enable(self, ver): self.flags(virt_type='kvm', group='libvirt') instance = objects.Instance(**self.inst) image_meta = objects.ImageMeta.from_dict({}) self.assertRaises(exception.QemuGuestAgentNotEnabled, self.drvr._can_quiesce, instance, image_meta) @mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_volume_snapshot_create') @mock.patch('nova.virt.libvirt.driver.LibvirtDriver.' '_volume_refresh_connection_info') def test_volume_snapshot_create_outer_success(self, mock_refresh, mock_snap_create, mock_loop): class FakeLoopingCall(object): def __init__(self, func): self.func = func def start(self, *a, **k): try: self.func() except loopingcall.LoopingCallDone: pass return self def wait(self): return None mock_loop.side_effect = FakeLoopingCall instance = objects.Instance(**self.inst) domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1) guest = libvirt_guest.Guest(domain) @mock.patch.object(self.drvr, '_volume_api') @mock.patch.object(self.drvr._host, 'get_guest') def _test(mock_get_guest, mock_vol_api): mock_get_guest.return_value = guest mock_vol_api.get_snapshot.return_value = {'status': 'available'} self.drvr.volume_snapshot_create(self.c, instance, self.volume_uuid, self.create_info) mock_get_guest.assert_called_once_with(instance) mock_snap_create.assert_called_once_with( self.c, instance, guest, self.volume_uuid, self.create_info['new_file']) mock_vol_api.update_snapshot_status.assert_called_once_with( self.c, self.create_info['snapshot_id'], 'creating') mock_vol_api.get_snapshot.assert_called_once_with( self.c, self.create_info['snapshot_id']) mock_refresh.assert_called_once_with( self.c, instance, self.volume_uuid) _test() def test_volume_snapshot_create_outer_failure(self): instance = objects.Instance(**self.inst) domain = FakeVirtDomain(fake_xml=self.dom_xml, id=1) guest = libvirt_guest.Guest(domain) self.mox.StubOutWithMock(self.drvr._host, 'get_guest') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create') self.drvr._host.get_guest(instance).AndReturn(guest) self.drvr._volume_snapshot_create(self.c, instance, guest, self.volume_uuid, self.create_info['new_file']).\ AndRaise(exception.NovaException('oops')) self.drvr._volume_api.update_snapshot_status( self.c, self.create_info['snapshot_id'], 'error') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_create, self.c, instance, self.volume_uuid, self.create_info) def test_volume_snapshot_delete_1(self): """Deleting newest snapshot -- blockRebase.""" # libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE flag fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) domain.blockRebase('vda', 'snap.img', 0, flags=0) domain.blockJobInfo('vda', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8}) def test_volume_snapshot_delete_relative_1(self): """Deleting newest snapshot -- blockRebase using relative flag""" self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) guest = libvirt_guest.Guest(domain) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_guest') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_guest(instance).AndReturn(guest) domain.blockRebase('vda', 'snap.img', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE) domain.blockJobInfo('vda', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def _setup_block_rebase_domain_and_guest_mocks(self, dom_xml): mock_domain = mock.Mock(spec=fakelibvirt.virDomain) mock_domain.XMLDesc.return_value = dom_xml guest = libvirt_guest.Guest(mock_domain) exc = fakelibvirt.make_libvirtError( fakelibvirt.libvirtError, 'virDomainBlockRebase() failed', error_code=fakelibvirt.VIR_ERR_OPERATION_INVALID) mock_domain.blockRebase.side_effect = exc return mock_domain, guest @mock.patch.object(host.Host, "has_min_version", mock.Mock(return_value=True)) @mock.patch("nova.virt.libvirt.guest.Guest.is_active", mock.Mock(return_value=False)) @mock.patch('nova.virt.images.qemu_img_info', return_value=mock.Mock(file_format="fake_fmt")) @mock.patch('nova.utils.execute') def test_volume_snapshot_delete_when_dom_not_running(self, mock_execute, mock_qemu_img_info): """Deleting newest snapshot of a file-based image when the domain is not running should trigger a blockRebase using qemu-img not libvirt. In this test, we rebase the image with another image as backing file. """ mock_domain, guest = self._setup_block_rebase_domain_and_guest_mocks( self.dom_xml) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' with mock.patch.object(self.drvr._host, 'get_guest', return_value=guest): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) mock_qemu_img_info.assert_called_once_with("snap.img") mock_execute.assert_called_once_with('qemu-img', 'rebase', '-b', 'snap.img', '-F', 'fake_fmt', 'disk1_file') @mock.patch.object(host.Host, "has_min_version", mock.Mock(return_value=True)) @mock.patch("nova.virt.libvirt.guest.Guest.is_active", mock.Mock(return_value=False)) @mock.patch('nova.virt.images.qemu_img_info', return_value=mock.Mock(file_format="fake_fmt")) @mock.patch('nova.utils.execute') def test_volume_snapshot_delete_when_dom_not_running_and_no_rebase_base( self, mock_execute, mock_qemu_img_info): """Deleting newest snapshot of a file-based image when the domain is not running should trigger a blockRebase using qemu-img not libvirt. In this test, the image is rebased onto no backing file (i.e. it will exist independently of any backing file) """ mock_domain, mock_guest = ( self._setup_block_rebase_domain_and_guest_mocks(self.dom_xml)) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) self.assertEqual(0, mock_qemu_img_info.call_count) mock_execute.assert_called_once_with('qemu-img', 'rebase', '-b', '', 'disk1_file') @mock.patch.object(host.Host, "has_min_version", mock.Mock(return_value=True)) @mock.patch("nova.virt.libvirt.guest.Guest.is_active", mock.Mock(return_value=False)) def test_volume_snapshot_delete_when_dom_with_nw_disk_not_running(self): """Deleting newest snapshot of a network disk when the domain is not running should raise a NovaException. """ mock_domain, mock_guest = ( self._setup_block_rebase_domain_and_guest_mocks( self.dom_netdisk_xml)) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' with mock.patch.object(self.drvr._host, 'get_guest', return_value=mock_guest): ex = self.assertRaises(exception.NovaException, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.assertIn('has not been fully tested', six.text_type(ex)) def test_volume_snapshot_delete_2(self): """Deleting older snapshot -- blockCommit.""" # libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.mox.ReplayAll() self.assertRaises(exception.Invalid, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_2) fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4}) def test_volume_snapshot_delete_relative_2(self): """Deleting older snapshot -- blockCommit using relative flag""" self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE) domain.blockJobInfo('vda', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1, 'end': 1000}) domain.blockJobInfo('vda', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_2) self.mox.VerifyAll() def test_volume_snapshot_delete_nonrelative_null_base(self): # Deleting newest and last snapshot of a volume # with blockRebase. So base of the new image will be null. instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_xml) guest = libvirt_guest.Guest(domain) with test.nested( mock.patch.object(domain, 'XMLDesc', return_value=self.dom_xml), mock.patch.object(self.drvr._host, 'get_guest', return_value=guest), mock.patch.object(domain, 'blockRebase'), mock.patch.object(domain, 'blockJobInfo', return_value={ 'type': 4, # See virDomainBlockJobType enum 'bandwidth': 0, 'cur': 1000, 'end': 1000}) ) as (mock_xmldesc, mock_get_guest, mock_rebase, mock_job_info): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) mock_xmldesc.assert_called_once_with(flags=0) mock_get_guest.assert_called_once_with(instance) mock_rebase.assert_called_once_with('vda', None, 0, flags=0) mock_job_info.assert_called_once_with('vda', flags=0) def test_volume_snapshot_delete_netdisk_nonrelative_null_base(self): # Deleting newest and last snapshot of a network attached volume # with blockRebase. So base of the new image will be null. instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeVirtDomain(fake_xml=self.dom_netdisk_xml_2) guest = libvirt_guest.Guest(domain) with test.nested( mock.patch.object(domain, 'XMLDesc', return_value=self.dom_netdisk_xml_2), mock.patch.object(self.drvr._host, 'get_guest', return_value=guest), mock.patch.object(domain, 'blockRebase'), mock.patch.object(domain, 'blockJobInfo', return_value={ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) ) as (mock_xmldesc, mock_get_guest, mock_rebase, mock_job_info): self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_3) mock_xmldesc.assert_called_once_with(flags=0) mock_get_guest.assert_called_once_with(instance) mock_rebase.assert_called_once_with('vdb', None, 0, flags=0) mock_job_info.assert_called_once_with('vdb', flags=0) def test_volume_snapshot_delete_outer_success(self): instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete') self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, delete_info=self.delete_info_1) self.drvr._volume_api.update_snapshot_status( self.c, snapshot_id, 'deleting') self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info') self.drvr._volume_refresh_connection_info(self.c, instance, self.volume_uuid) self.mox.ReplayAll() self.drvr.volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_outer_failure(self): instance = objects.Instance(**self.inst) snapshot_id = '1234-9876' FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete') self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, delete_info=self.delete_info_1).\ AndRaise(exception.NovaException('oops')) self.drvr._volume_api.update_snapshot_status( self.c, snapshot_id, 'error_deleting') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_invalid_type(self): instance = objects.Instance(**self.inst) FakeVirtDomain(fake_xml=self.dom_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(self.drvr, '_volume_api') self.drvr._volume_api.update_snapshot_status( self.c, self.snapshot_id, 'error_deleting') self.mox.ReplayAll() self.assertRaises(exception.NovaException, self.drvr.volume_snapshot_delete, self.c, instance, self.volume_uuid, self.snapshot_id, self.delete_info_invalid_type) def test_volume_snapshot_delete_netdisk_1(self): """Delete newest snapshot -- blockRebase for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml # libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) domain.blockRebase('vdb', 'vdb[1]', 0, flags=0) domain.blockJobInfo('vdb', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8}) def test_volume_snapshot_delete_netdisk_relative_1(self): """Delete newest snapshot -- blockRebase for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) domain.blockRebase('vdb', 'vdb[1]', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE) domain.blockJobInfo('vdb', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_1) self.mox.VerifyAll() def test_volume_snapshot_delete_netdisk_2(self): """Delete older snapshot -- blockCommit for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml # libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE') self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) self.mox.ReplayAll() self.assertRaises(exception.Invalid, self.drvr._volume_snapshot_delete, self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_netdisk) fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4}) def test_volume_snapshot_delete_netdisk_relative_2(self): """Delete older snapshot -- blockCommit for libgfapi/network disk.""" class FakeNetdiskDomain(FakeVirtDomain): def __init__(self, *args, **kwargs): super(FakeNetdiskDomain, self).__init__(*args, **kwargs) def XMLDesc(self, flags): return self.dom_netdisk_xml self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt) instance = objects.Instance(**self.inst) snapshot_id = 'snapshot-1234' domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml) self.mox.StubOutWithMock(domain, 'XMLDesc') domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml) self.mox.StubOutWithMock(self.drvr._host, 'get_domain') self.mox.StubOutWithMock(domain, 'blockRebase') self.mox.StubOutWithMock(domain, 'blockCommit') self.mox.StubOutWithMock(domain, 'blockJobInfo') self.drvr._host.get_domain(instance).AndReturn(domain) domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0, flags=fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE) domain.blockJobInfo('vdb', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1, 'end': 1000}) domain.blockJobInfo('vdb', flags=0).AndReturn({ 'type': 0, 'bandwidth': 0, 'cur': 1000, 'end': 1000}) self.mox.ReplayAll() self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid, snapshot_id, self.delete_info_netdisk) self.mox.VerifyAll() def _fake_convert_image(source, dest, in_format, out_format, run_as_root=True): libvirt_driver.libvirt_utils.files[dest] = '' class _BaseSnapshotTests(test.NoDBTestCase): def setUp(self): super(_BaseSnapshotTests, self).setUp() self.flags(snapshots_directory='./', group='libvirt') self.context = context.get_admin_context() self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.driver.libvirt_utils', fake_libvirt_utils)) self.useFixture(fixtures.MonkeyPatch( 'nova.virt.libvirt.imagebackend.libvirt_utils', fake_libvirt_utils)) self.image_service = nova.tests.unit.image.fake.stub_out_image_service( self) self.mock_update_task_state = mock.Mock() test_instance = _create_test_instance() self.instance_ref = objects.Instance(**test_instance) self.instance_ref.info_cache = objects.InstanceInfoCache( network_info=None) def _assert_snapshot(self, snapshot, disk_format, expected_properties=None): self.mock_update_task_state.assert_has_calls([ mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD), mock.call(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD)]) props = snapshot['properties'] self.assertEqual(props['image_state'], 'available') self.assertEqual(snapshot['status'], 'active') self.assertEqual(snapshot['disk_format'], disk_format) self.assertEqual(snapshot['name'], 'test-snap') if expected_properties: for expected_key, expected_value in \ six.iteritems(expected_properties): self.assertEqual(expected_value, props[expected_key]) def _create_image(self, extra_properties=None): properties = {'instance_id': self.instance_ref['id'], 'user_id': str(self.context.user_id)} if extra_properties: properties.update(extra_properties) sent_meta = {'name': 'test-snap', 'is_public': False, 'status': 'creating', 'properties': properties} # Create new image. It will be updated in snapshot method # To work with it from snapshot, the single image_service is needed recv_meta = self.image_service.create(self.context, sent_meta) return recv_meta @mock.patch.object(host.Host, 'has_min_version') @mock.patch.object(imagebackend.Image, 'resolve_driver_format') @mock.patch.object(host.Host, 'get_domain') def _snapshot(self, image_id, mock_get_domain, mock_resolve, mock_version): mock_get_domain.return_value = FakeVirtDomain() driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) driver.snapshot(self.context, self.instance_ref, image_id, self.mock_update_task_state) snapshot = self.image_service.show(self.context, image_id) return snapshot def _test_snapshot(self, disk_format, extra_properties=None): recv_meta = self._create_image(extra_properties=extra_properties) snapshot = self._snapshot(recv_meta['id']) self._assert_snapshot(snapshot, disk_format=disk_format, expected_properties=extra_properties) class LibvirtSnapshotTests(_BaseSnapshotTests): def test_ami(self): # Assign different image_ref from nova/images/fakes for testing ami self.instance_ref.image_ref = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' self.instance_ref.system_metadata = \ utils.get_system_metadata_from_image( {'disk_format': 'ami'}) self._test_snapshot(disk_format='ami') @mock.patch.object(fake_libvirt_utils, 'disk_type', new='raw') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) def test_raw(self, mock_convert_image): self._test_snapshot(disk_format='raw') def test_qcow2(self): self._test_snapshot(disk_format='qcow2') @mock.patch.object(fake_libvirt_utils, 'disk_type', new='ploop') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) def test_ploop(self, mock_convert_image): self._test_snapshot(disk_format='ploop') def test_no_image_architecture(self): self.instance_ref.image_ref = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' self._test_snapshot(disk_format='qcow2') def test_no_original_image(self): self.instance_ref.image_ref = '661122aa-1234-dede-fefe-babababababa' self._test_snapshot(disk_format='qcow2') def test_snapshot_metadata_image(self): # Assign an image with an architecture defined (x86_64) self.instance_ref.image_ref = 'a440c04b-79fa-479c-bed1-0b816eaec379' extra_properties = {'architecture': 'fake_arch', 'key_a': 'value_a', 'key_b': 'value_b', 'os_type': 'linux'} self._test_snapshot(disk_format='qcow2', extra_properties=extra_properties) @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(return_value=['test-pool', '', '']) rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd']) with mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')): with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'): self._test_snapshot(disk_format='raw') rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool') rbd.flatten.assert_called_with(mock.ANY, pool='test-pool') @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_graceful_fallback(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable( image_id='fake_id', reason='rbd testing')) with test.nested( mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image), mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')), mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')): self._test_snapshot(disk_format='raw') self.assertFalse(rbd.clone.called) @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_eperm(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(return_value=['test-pool', '', '']) rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd']) rbd.clone = mock.Mock(side_effect=exception.Forbidden( image_id='fake_id', reason='rbd testing')) with test.nested( mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image), mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')), mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd')): self._test_snapshot(disk_format='raw') # Ensure that the direct_snapshot attempt was cleaned up rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=False, pool='b', force=True) @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_post_process_fails(self, mock_rbd, mock_driver): self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(return_value=['test-pool', '', '']) rbd.parse_url = mock.Mock(return_value=['a', 'b', 'c', 'd']) with test.nested( mock.patch.object(fake_libvirt_utils, 'find_disk', return_value=('rbd://some/fake/rbd/image', 'raw')), mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'), mock.patch.object(self.image_service, 'update', side_effect=test.TestingException)): self.assertRaises(test.TestingException, self._test_snapshot, disk_format='raw') rbd.clone.assert_called_with(mock.ANY, mock.ANY, dest_pool='test-pool') rbd.flatten.assert_called_with(mock.ANY, pool='test-pool') # Ensure that the direct_snapshot attempt was cleaned up rbd.remove_snap.assert_called_with('c', 'd', ignore_errors=True, pool='b', force=True) @mock.patch.object(imagebackend.Image, 'direct_snapshot') @mock.patch.object(imagebackend.Image, 'resolve_driver_format') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(host.Host, 'get_guest') def test_raw_with_rbd_clone_is_live_snapshot(self, mock_get_guest, mock_version, mock_resolve, mock_snapshot): self.flags(disable_libvirt_livesnapshot=False, group='workarounds') self.flags(images_type='rbd', group='libvirt') mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_guest._domain = mock.Mock() mock_get_guest.return_value = mock_guest driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) recv_meta = self._create_image() with mock.patch.object(driver, "suspend") as mock_suspend: driver.snapshot(self.context, self.instance_ref, recv_meta['id'], self.mock_update_task_state) self.assertFalse(mock_suspend.called) @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) @mock.patch.object(fake_libvirt_utils, 'find_disk') @mock.patch.object(imagebackend.Image, 'resolve_driver_format') @mock.patch.object(host.Host, 'has_min_version', return_value=True) @mock.patch.object(host.Host, 'get_guest') @mock.patch.object(rbd_utils, 'RBDDriver') @mock.patch.object(rbd_utils, 'rbd') def test_raw_with_rbd_clone_failure_does_cold_snapshot(self, mock_rbd, mock_driver, mock_get_guest, mock_version, mock_resolve, mock_find_disk, mock_convert): self.flags(disable_libvirt_livesnapshot=False, group='workarounds') self.flags(images_type='rbd', group='libvirt') rbd = mock_driver.return_value rbd.parent_info = mock.Mock(side_effect=exception.ImageUnacceptable( image_id='fake_id', reason='rbd testing')) mock_find_disk.return_value = ('rbd://some/fake/rbd/image', 'raw') mock_guest = mock.Mock(spec=libvirt_guest.Guest) mock_guest.get_power_state.return_value = power_state.RUNNING mock_guest._domain = mock.Mock() mock_get_guest.return_value = mock_guest driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) recv_meta = self._create_image() with mock.patch.object(fake_libvirt_utils, 'disk_type', new='rbd'): with mock.patch.object(driver, "suspend") as mock_suspend: driver.snapshot(self.context, self.instance_ref, recv_meta['id'], self.mock_update_task_state) self.assertTrue(mock_suspend.called) class LXCSnapshotTests(LibvirtSnapshotTests): """Repeat all of the Libvirt snapshot tests, but with LXC enabled""" def setUp(self): super(LXCSnapshotTests, self).setUp() self.flags(virt_type='lxc', group='libvirt') def test_raw_with_rbd_clone_failure_does_cold_snapshot(self): self.skipTest("managedSave is not supported with LXC") class LVMSnapshotTests(_BaseSnapshotTests): @mock.patch.object(fake_libvirt_utils, 'disk_type', new='lvm') @mock.patch.object(libvirt_driver.imagebackend.images, 'convert_image', side_effect=_fake_convert_image) @mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info') def _test_lvm_snapshot(self, disk_format, mock_volume_info, mock_convert_image): self.flags(images_type='lvm', images_volume_group='nova-vg', group='libvirt') self._test_snapshot(disk_format=disk_format) mock_volume_info.assert_has_calls([mock.call('/dev/nova-vg/lv')]) mock_convert_image.assert_called_once_with( '/dev/nova-vg/lv', mock.ANY, 'raw', disk_format, run_as_root=True) def test_raw(self): self._test_lvm_snapshot('raw') def test_qcow2(self): self.flags(snapshot_image_format='qcow2', group='libvirt') self._test_lvm_snapshot('qcow2')
apache-2.0
6,406,292,573,906,182,000
44.142063
97
0.549332
false
kaidebrick/piface_thingsboard
piface_thingsboard.py
1
1271
#!/usr/bin/env python3 import paho.mqtt.client as mqtt import configparser import pifacedigitalio as PFIO APPNAME = os.path.splitext(os.path.basename(__file__))[0] INIFILE = os.getenv('INIFILE', APPNAME + '.ini') config = configparser.ConfigParser() config.read(INIFILE) MODULE = 'PFIO' MQTT_HOST = config.get("global", "mqtt_host") MQTT_PORT = config.getint("global", "mqtt_port") MQTT_USERNAME = config.get("global", "mqtt_username") MQTT_PASSWORD = config.get("global", "mqtt_password") MQTT_CERT_PATH = config.get("global", "mqtt_cert_path") MQTT_TLS_INSECURE = config.get("global", "mqtt_tls_insecure") MQTT_TLS_PROTOCOL = config.get("global", "mqtt_tls_protocol") MQTT_CLIENT_ID = config.get("global", "mqtt_client_id") MQTT_TOPIC = config.get("global", "mqtt_topic") MQTT_QOS = config.getint("global", "mqtt_qos") MQTT_RETAIN = config.getboolean("global", "mqtt_retain") MQTT_CLEAN_SESSION = config.getboolean("global", "mqtt_clean_session") MQTT_LWT = config.get("global", "mqtt_lwt") MONITOR_PINS = config.get("global", "monitor_pins", raw=True) MONITOR_POLL = config.getfloat("global", "monitor_poll") PINS = [] if MONITOR_PINS: PINS.extend(list(map(int, MONITOR_PINS.split(",")))) for PIN in PINS: PINS[PINS.index(PIN)] = [PIN, -1]
gpl-3.0
-1,371,690,723,719,318,300
31.589744
70
0.698662
false
anaran/olympia
apps/bandwagon/tasks.py
1
4144
import logging import math from django.core.files.storage import default_storage as storage from django.db.models import Count import elasticutils.contrib.django as elasticutils from celeryutils import task import amo from amo.decorators import set_modified_on from amo.helpers import user_media_path from amo.utils import attach_trans_dict, resize_image from tags.models import Tag from lib.es.utils import index_objects from . import search from .models import (Collection, CollectionAddon, CollectionVote, CollectionWatcher) log = logging.getLogger('z.task') @task def collection_votes(*ids, **kw): log.info('[%s@%s] Updating collection votes.' % (len(ids), collection_votes.rate_limit)) using = kw.get('using') for collection in ids: v = CollectionVote.objects.filter(collection=collection).using(using) votes = dict(v.values_list('vote').annotate(Count('vote'))) c = Collection.objects.get(id=collection) c.upvotes = up = votes.get(1, 0) c.downvotes = down = votes.get(-1, 0) try: # Use log to limit the effect of the multiplier. c.rating = (up - down) * math.log(up + down) except ValueError: c.rating = 0 c.save() @task @set_modified_on def resize_icon(src, dst, locally=False, **kw): """Resizes collection icons to 32x32""" log.info('[1@None] Resizing icon: %s' % dst) try: resize_image(src, dst, (32, 32), locally=locally) return True except Exception, e: log.error("Error saving collection icon: %s" % e) @task def delete_icon(dst, **kw): log.info('[1@None] Deleting icon: %s.' % dst) if not dst.startswith(user_media_path('collection_icons')): log.error("Someone tried deleting something they shouldn't: %s" % dst) return try: storage.delete(dst) except Exception, e: log.error("Error deleting icon: %s" % e) @task def collection_meta(*ids, **kw): log.info('[%s@%s] Updating collection metadata.' % (len(ids), collection_meta.rate_limit)) using = kw.get('using') qs = (CollectionAddon.objects.filter(collection__in=ids) .using(using).values_list('collection')) counts = dict(qs.annotate(Count('id'))) persona_counts = dict(qs.filter(addon__type=amo.ADDON_PERSONA) .annotate(Count('id'))) tags = (Tag.objects.not_blacklisted().values_list('id') .annotate(cnt=Count('id')).filter(cnt__gt=1).order_by('-cnt')) for c in Collection.objects.no_cache().filter(id__in=ids): addon_count = counts.get(c.id, 0) all_personas = addon_count == persona_counts.get(c.id, None) addons = list(c.addons.values_list('id', flat=True)) c.top_tags = [t for t, _ in tags.filter(addons__in=addons)[:5]] Collection.objects.filter(id=c.id).update(addon_count=addon_count, all_personas=all_personas) @task def collection_watchers(*ids, **kw): log.info('[%s@%s] Updating collection watchers.' % (len(ids), collection_watchers.rate_limit)) using = kw.get('using') for pk in ids: try: watchers = (CollectionWatcher.objects.filter(collection=pk) .using(using).count()) Collection.objects.filter(pk=pk).update(subscribers=watchers) log.info('Updated collection watchers: %s' % pk) except Exception, e: log.error('Updating collection watchers failed: %s, %s' % (pk, e)) @task def index_collections(ids, **kw): log.debug('Indexing collections %s-%s [%s].' % (ids[0], ids[-1], len(ids))) index = kw.pop('index', None) index_objects(ids, Collection, search, index, [attach_translations]) def attach_translations(collections): """Put all translations into a translations dict.""" attach_trans_dict(Collection, collections) @task def unindex_collections(ids, **kw): for id in ids: log.debug('Removing collection [%s] from search index.' % id) Collection.unindex(id)
bsd-3-clause
8,352,693,703,532,291,000
33.247934
79
0.625
false
FATSLiM/fatslim
docs/sphinx-src/documentation/tutorials/show_apl_map.py
1
1525
#!/usr/bin/env python # -*- coding: utf8 -*- import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import griddata CSV_FILENAME = "bilayer_prot_apl_frame_00000.csv" GRO_FILENAME = "bilayer_prot.gro" PNG_FILENAME = "bilayer_prot_apl_frame_00000.png" # Get Box vectors last_line = "" with open(GRO_FILENAME) as fp: for line in fp: line = line.strip() if len(line) == 0: continue last_line = line box_x, box_y = [float(val) for val in line.split()[:2]] # Get values membrane_property = "Area per lipid" x_values = [] y_values = [] z_values = [] property_values = [] with open(CSV_FILENAME) as fp: for lino, line in enumerate(fp): if lino == 0: membrane_property = line.split(",")[-1].strip() else: line = line.strip() if len(line) == 0: continue resid, leaflet, x, y, z, value = line.split(",") x_values.append(float(x)) y_values.append(float(y)) property_values.append(float(value)) # Building data from plotting grid_x, grid_y = np.mgrid[0:box_x:50j, 0:box_y:50j] points = np.stack((np.array(x_values).T, np.array(y_values).T), axis=-1) values = np.array(property_values) grid = griddata(points, values, (grid_x, grid_y), method='cubic') # Plot map plt.contourf(grid_x, grid_y, grid) cbar = plt.colorbar() plt.title(membrane_property) plt.xlabel("Box X (nm)") plt.ylabel("Box Y (nm)") plt.tight_layout() plt.savefig(PNG_FILENAME)
gpl-3.0
6,198,686,079,688,072,000
22.828125
72
0.61377
false
andree182/podx3
getrawaudio.py
1
1079
#!/usr/bin/env python """ Reads audio data from the device through isochronous transfer. NOTE: Due to alignment or whatever, the data is not correct. Data size of the input endpoint is 170B, but the the actual data size is 6 * (3*2*4), the rest bytes are filled with zero. """ import usb.util import time ID_VENDOR = 0x0e41 ID_PRODUCT = 0x414a emptyData = chr(0) * (7 * 6) d = usb.core.find(idVendor = ID_VENDOR, idProduct = ID_PRODUCT) if d is None: raise ValueError("not connected") d.set_interface_altsetting(0,1) x = [] def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in xrange(0, len(l), n): yield l[i:i+n] for i in range(0, 100): nx = d.read(0x86, 16384, 1000) print len(nx) if len(nx) == 0: # d.write(0x02, emptyData) # attempt to revive device after input stream freezes time.sleep(0.001) continue raw = [] for i in chunks(nx, 170): raw += i[:144] d.write(0x02, nx[:len(raw)/4]) x += [raw] f = file("test.raw", "w") for i in x: f.write(''.join(map(chr,i)))
gpl-2.0
-2,545,959,085,404,143,600
20.58
88
0.624652
false
CPedrini/TateTRES
erapi.py
1
11009
#-*- encoding: utf-8 -*- import csv, math, time, re, threading, sys try: from urllib.request import urlopen except ImportError: from urllib import urlopen class ErAPI(): # Metodo constructor, seteos basicos necesarios de configuracion, instancia objetos utiles def __init__(self): self.data = {} # Data format: {'XXCiro|BNC': {'id': 123456, 'nick': 'XXCiro', 'level': 49, 'strength': 532.5, 'rank_points': 1233354, 'citizenship': 'Argentina'}} # Diccionario de puntos/rango self.rank_required_points = { "Recruit": 0, "Private": 15, "Private*": 45, "Private**": 80, "Private***": 120, "Corporal": 170, "Corporal*": 250, "Corporal**": 350, "Corporal***": 450, "Sergeant": 600, "Sergeant*": 800, "Sergeant**": 1000, "Sergeant***": 1400, "Lieutenant": 1850, "Lieutenant*": 2350, "Lieutenant**": 3000, "Lieutenant***": 3750, "Captain": 5000, "Captain*": 6500, "Captain**": 9000, "Captain***": 12000, "Major": 15500, "Major*": 20000, "Major**": 25000, "Major***": 31000, "Commander": 40000, "Commander*": 52000, "Commander**": 67000, "Commander***": 85000, "Lt Colonel": 110000, "Lt Colonel*": 140000, "Lt Colonel**": 180000, "Lt Colonel***": 225000, "Colonel": 285000, "Colonel*": 355000, "Colonel**": 435000, "Colonel***": 540000, "General": 660000, "General*": 800000, "General**": 950000, "General***": 1140000, "Field Marshal": 1350000, "Field Marshal*": 1600000, "Field Marshal**": 1875000, "Field Marshal***": 2185000, "Supreme Marshal": 2550000, "Supreme Marshal*": 3000000, "Supreme Marshal**": 3500000, "Supreme Marshal***": 4150000, "National Force": 4900000, "National Force*": 5800000, "National Force**": 7000000, "National Force***": 9000000, "World Class Force": 11500000, "World Class Force*": 14500000, "World Class Force**": 18000000, "World Class Force***": 22000000, "Legendary Force": 26500000, "Legendary Force*": 31500000, "Legendary Force**": 37000000, "Legendary Force***": 42000000, "God of War": 50000000, "God of War*": 100000000 , "God of War**": 200000000, "God of War***": 500000000, "Titan": 1000000000, "Titan*": 2000000000, "Titan**": 4000000000, "Titan***": 10000000000} # Lista ordenada de rangos segun importancia self.rank_to_pos = [ "Recruit", "Private", "Private*", "Private**", "Private***", "Corporal", "Corporal*", "Corporal**", "Corporal***", "Sergeant", "Sergeant*", "Sergeant**", "Sergeant***", "Lieutenant", "Lieutenant*", "Lieutenant**", "Lieutenant***", "Captain", "Captain*", "Captain**", "Captain***", "Major", "Major*", "Major**", "Major***", "Commander", "Commander*", "Commander**", "Commander***", "Lt Colonel", "Lt Colonel*", "Lt Colonel**", "Lt Colonel***", "Colonel", "Colonel*", "Colonel**", "Colonel***", "General", "General*", "General**", "General***", "Field Marshal", "Field Marshal*", "Field Marshal**", "Field Marshal***", "Supreme Marshal", "Supreme Marshal*", "Supreme Marshal**", "Supreme Marshal***", "National Force", "National Force*", "National Force**", "National Force***", "World Class Force", "World Class Force*", "World Class Force**", "World Class Force***", "Legendary Force", "Legendary Force*", "Legendary Force**", "Legendary Force***", "God of War", "God of War*", "God of War**", "God of War***", "Titan", "Titan*", "Titan**", "Titan***",] # Bandera de ejecucion, util en caso de que se decida matar de forma manual los threads para actualizar y guardar los datos self.run = True # Se paraleliza la carga de datos en un hilo nuevo, el cual es demonio del invocador en caso de "muerte prematura" th = threading.Thread(target=self.data_loader) th.daemon = True th.start() # Metodo invocador, carga datos y crea threads para guardar y actualizar informacion, solo llamado desde constructor def data_loader(self): self.load_data() self.data_saver_th = threading.Thread(target=self.data_saver) self.data_saver_th.daemon = True self.data_saver_th.start() self.data_updater_th = threading.Thread(target=self.data_updater) self.data_updater_th.daemon = True self.data_updater_th.start() # Metodo para volcar informacion a archivo fisico, solo llamado de metodo data_loader def data_saver(self): while self.run: self.save_data() time.sleep(60) # Metodo para actualizar informacion, solo llamado de metodo data_loader def data_updater(self): while self.run: for irc_nick in self.data: self.update_data(irc_nick) time.sleep(30) time.sleep(600) # ---------------------------------------------------------------------------------- # # @ PUBLIC METHODS # # ---------------------------------------------------------------------------------- # # Metodo para actualizar informacion local del objeto desde archivo def load_data(self): try: f = open('data/er_nick-data.csv', 'rt') reader = csv.reader(f) for nick_irc,id,nick_er,level,strength,rank_points,citizenship in reader: self.data[nick_irc] = {'id': int(id), 'nick': nick_er, 'level': int(level), 'strength': float(strength), 'rank_points': int(rank_points), 'citizenship': citizenship} f.close() except: pass # Metodo para guardar informacion local del objeto en archivo def save_data(self): try: f = open('data/er_nick-data.csv', 'wt') writer = csv.writer(f) for u in self.data: writer.writerow([u, self.data[u]['id'], self.data[u]['nick'], self.data[u]['level'], self.data[u]['strength'], self.data[u]['rank_points'], self.data[u]['citizenship']]) f.close() except: pass # Metodo scraper para actualizar informacion local del objeto del nick de irc especificado def update_data(self, irc_nick): try: id = self.data[irc_nick]['id'] c = urlopen('http://www.erepublik.com/es/citizen/profile/%d' % id) page = c.read() c.close() self.data[irc_nick]['nick'] = re.search('<meta name="title" content="(.+?) - Ciudadano del Nuevo Mundo" \/>', page.decode('utf-8')).group(1) self.data[irc_nick]['level'] = int(re.search('<strong class="citizen_level">(.+?)<\/strong>', page.decode('utf-8'), re.DOTALL).group(1)) self.data[irc_nick]['strength'] = float(re.search('<span class="military_box_info mb_bottom">(.+?)</span>', page.decode('utf-8'), re.DOTALL).group(1).strip('\r\n\t ').replace(',','')) self.data[irc_nick]['rank_points'] = int(re.search('<span class="rank_numbers">(.+?) \/', page.decode('utf-8'), re.DOTALL).group(1).replace(',','')) self.data[irc_nick]['citizenship'] = re.search('<a href="http\:\/\/www.erepublik.com\/es\/country\/society\/([^ \t\n\x0B\f\r]+?)">', page.decode('utf-8')).group(1) except: pass # Metodo para actualizar informacion local del objeto con nick de irc e id especificados, fuerza actualizacion del mismo def reg_nick_write(self, nick, id): if(nick.lower() in self.data.keys()): self.data[nick.lower()]['id'] = int(id) else: self.data[nick.lower()] = {'id': int(id), 'nick': nick, 'level': 1, 'strength': 0, 'rank_points': 0, 'citizenship': ''} self.update_data(nick.lower()) # Metodo para obtener ID del nick de irc especificado def get_id(self, nick): return self.data[nick.lower()]['id'] # Metodo para obtener LEVEL del nick de irc especificado def get_level(self, nick): return self.data[nick.lower()]['level'] # Metodo para obtener STRENGTH del nick de irc especificado def get_strength(self, nick): return self.data[nick.lower()]['strength'] # Metodo para obtener RANK POINTS del nick de irc especificado def get_rank_points(self, nick): return self.data[nick.lower()]['rank_points'] # Metodo para obtener CITIZENSHIP del nick de irc especificado def get_citizenship(self, nick): return self.data[nick.lower()]['citizenship'] # Metodo para obtener NICK INGAME del nick de irc especificado def get_nick(self, nick): return self.data[nick.lower()]['nick'] # Metodo para obtener RANK NAME del nick de irc especificado def calculate_rank_name(self, rank_points): index = 0 for k in [key for key in self.rank_required_points.keys() if self.rank_required_points[key] < rank_points]: if(self.rank_to_pos.index(k) > index): index = self.rank_to_pos.index(k) return self.rank_to_pos[index] # Metodo para calcular DAÑO del nick de irc especificado segun datos adicionales def calculate_damage(self, rank_points, strength, weapon_power, level, bonus): index = 0 for k in [key for key in self.rank_required_points.keys() if self.rank_required_points[key] < rank_points]: if(self.rank_to_pos.index(k) > index): index = self.rank_to_pos.index(k) return(math.trunc(((index / 20) + 0.3) * ((strength / 10) + 40) * (1 + (weapon_power / 100)) * (1.1 if level > 99 else 1) * bonus))
apache-2.0
-1,751,381,920,954,348,000
36.962069
195
0.511446
false
vivaxy/algorithms
python/problems/sum_of_even_numbers_after_queries.py
1
1068
""" https://leetcode.com/problems/sum-of-even-numbers-after-queries/ https://leetcode.com/submissions/detail/215582833/ """ from typing import List class Solution: def sumEvenAfterQueries(self, A: List[int], queries: List[List[int]]) -> List[int]: ANS = [] ans = 0 for a in A: if a % 2 == 0: ans += a for query in queries: [val, index] = query A[index] += val if A[index] % 2 == 0 and val % 2 == 1: ans += A[index] elif A[index] % 2 == 0 and val % 2 == 0: ans += val elif A[index] % 2 == 1 and val % 2 == 1: ans -= A[index] - val ANS.append(ans) return ANS import unittest class Test(unittest.TestCase): def test(self): solution = Solution() self.assertEqual(solution.sumEvenAfterQueries( [1, 2, 3, 4], [[1, 0], [-3, 1], [-4, 0], [2, 3]]), [8, 6, 2, 4] ) if __name__ == '__main__': unittest.main()
mit
-5,519,847,744,780,625,000
23.272727
87
0.468165
false
heromod/migrid
mig/reST/html_writer.py
1
1966
#!/usr/bin/python # -*- coding: utf-8 -*- # # --- BEGIN_HEADER --- # # html_writer - [insert a few words of module description on this line] # Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter # # This file is part of MiG. # # MiG is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # MiG is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # -- END_HEADER --- # from docutils.writers.html4css1 import Writer, HTMLTranslator from docutils.core import publish_string # Setup a translator writer html_writer = Writer() html_writer.translator_class = HTMLTranslator # Setup a restructured text example reST = \ """ Example of reST: ================ This is a small example of the way reST can be used as a base for generating HTMLformatted text that: - looks nice - is standards compliant - is flexible We *may* decide to start using this as text formatting tool in MiG__ later on. __ http://mig-1.imada.sdu.dk/ We can also use it for creating tables if we want to: ===== ===== ====== Input Output ----- ----- ------ A B A or B ===== ===== ====== False False False True False True False True True True True True ===== ===== ====== Have fun! ---- Cheers, Jonas """ # Translate reST to html html = publish_string(reST, settings_overrides={'output_encoding' : 'unicode'}, writer=html_writer) print html
gpl-2.0
4,386,083,187,276,027,400
23.575
101
0.677009
false
davhenriksen/bringhomethebacon
web/web.py
1
27835
#!/usr/bin/env python # web.py # bring home the bacon Copyright (C) 2012 David Ormbakken Henriksen (davidohenriksen@gmail.com) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. import os import sqlite3 import re import sys import subprocess import tornado.httpserver import tornado.ioloop import tornado.web import tornado.options import tornado.autoreload import simplejson as json from tornado.options import define, options define("port", default=8080, help="run on the given port", type=int) class Application(tornado.web.Application): def __init__(self): handlers = [ (r"/", MainHandler), (r"/rulesets", RulesetsHandler), (r"/rules", RulesHandler), (r"/sensors", SensorsHandler), (r"/get_rulesets", GetRulesetsHandler), (r"/get_rules", GetRulesHandler), (r"/get_sensors", GetSensorsHandler), (r"/add_sensor", AddSensorHandler), (r"/remove_sensor", RemoveSensorHandler), (r"/open_rule", OpenRuleHandler), (r"/getsensorname", GetSensorNameHandler), (r"/tuning_rules", TuningRulesHandler), (r"/tuning_rulesets", TuningRulesetsHandler), (r"/update_sensor", UpdateSensorHandler), (r"/update", UpdateHandler), (r"/atuninghelp", ATuningHelpHandler), (r"/suppress", SuppressHandler), (r"/threshold", ThresholdHandler), (r"/atuning", ATuningHandler), (r"/get_atuning", GetATuningHandler), (r"/remove_atuning", RemoveATuningHandler), (r"/distribute", DistributeHandler), ] settings = dict( #login_url="/auth/login", template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), autoescape=None) tornado.web.Application.__init__(self, handlers, **settings) class RemoveATuningHandler(tornado.web.RequestHandler): def post(self): syntax = self.request.arguments.get("atuningid") db = sqlite3.connect('../DB.db') cursor = db.cursor() try: cursor.execute('SELECT sname FROM sensors') all_sensors = cursor.fetchall() for hit in all_sensors: table = hit[0]+'_threshold' sql = 'DELETE FROM %s WHERE syntax="%s"' % (table,syntax[0]) cursor.execute(sql) db.commit() except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('RemoveATuningHandler ERROR: '+str(e)+'\n') FILE.close() cursor.close() db.close() class GetATuningHandler(tornado.web.RequestHandler): def get(self): db = sqlite3.connect('../DB.db') cursor = db.cursor() atuning = [] try: cursor.execute('SELECT sname FROM sensors') all_sensors = cursor.fetchall() for hit in all_sensors: table = hit[0]+'_threshold' sql = 'SELECT * FROM '+table cursor.execute(sql) for row in cursor: idnr,sid,typ,syntax,comment,sensor = row check = "<center><input type='checkbox' name='atuningid' value='%s'></center>" % (syntax) tmp = (check,sid,typ,syntax,comment,sensor) if tmp not in atuning: atuning.append(tmp) except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('GetATuningHandler ERROR: '+str(e)+'\n') FILE.close() cursor.close() db.close() self.write(json.dumps({"aaData":atuning},sort_keys=True,indent=4)) class ThresholdHandler(tornado.web.RequestHandler): def post(self): db = sqlite3.connect('../DB.db') cursor = db.cursor() if 'sigid' not in self.request.arguments: self.write('Input missing. Try again.') elif 'count' not in self.request.arguments: self.write('Input missing. Try again.') elif 'sec' not in self.request.arguments: self.write('Input missing. Try again.') else: genid = self.request.arguments.get("genid") sigid = self.request.arguments.get("sigid") typ = self.request.arguments.get("type") track = self.request.arguments.get("track") count = self.request.arguments.get("count") sec = self.request.arguments.get("sec") sensor = self.request.arguments.get("select") comment = '' if 'comment' in self.request.arguments: tmp = self.request.arguments.get("comment") comment = tmp[0] syntax = 'event_filter gen_id '+genid[0]+',sig_id '+sigid[0]+',type '+typ[0]+',track '+track[0]+',count '+count[0]+',seconds '+sec[0] try: def insert_t(table,x): sql = 'INSERT OR IGNORE INTO '+table+' (id,sid,type,syntax,comment,sensor) VALUES (null,'+sigid[0]+',"threshold","'+syntax+'","'+comment+'","'+x+'")' cursor.execute(sql) if not (sensor[0] == "all"): table = sensor[0]+'_threshold' insert_t(table,sensor[0]) else: cursor.execute('SELECT sname FROM sensors') all_sensors = cursor.fetchall() for hit in all_sensors: table = hit[0]+'_threshold' insert_t(table,'ALL') db.commit() self.write('threshold rule for sid: '+sigid[0]+' has been added!') except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('ThresholdHandler ERROR: '+str(e)+'\n') FILE.close() self.write(str(e)) cursor.close() db.close() class SuppressHandler(tornado.web.RequestHandler): def post(self): db = sqlite3.connect('../DB.db') cursor = db.cursor() if 'sigid' not in self.request.arguments: self.write('Input missing. Try again.') elif 'ip' not in self.request.arguments: self.write('Input missing. Try again.') else: genid = self.request.arguments.get("genid") sigid = self.request.arguments.get("sigid") track = self.request.arguments.get("track") ip = self.request.arguments.get("ip") sensor = self.request.arguments.get("select") comment = '' if 'comment' in self.request.arguments: tmp = self.request.arguments.get("comment") comment = tmp[0] syntax = 'suppress gen_id '+genid[0]+',sig_id '+sigid[0]+',track '+track[0]+',ip '+ip[0] try: def insert_t(table,x): sql = 'INSERT OR IGNORE INTO '+table+' (id,sid,type,syntax,comment,sensor) VALUES (NULL,'+sigid[0]+',"suppress","'+syntax+'","'+comment+'","'+x+'")' cursor.execute(sql) if not (sensor[0] == "all"): table = sensor[0]+'_threshold' insert_t(table,sensor[0]) else: cursor.execute('SELECT sname FROM sensors') all_sensors = cursor.fetchall() for hit in all_sensors: table = hit[0]+'_threshold' insert_t(table,'ALL') db.commit() self.write('suppress rule for sid: '+sigid[0]+' has been added!') except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('ThresholdHandler ERROR: '+str(e)+'\n') FILE.close() self.write(str(e)) cursor.close() db.close() class DistributeHandler(tornado.web.RequestHandler): def get(self): self.write('''<html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Distribute report</title> <link type="text/css" rel="stylesheet" href="../static/css/custom.css"/> <link type="text/css" rel="stylesheet" href="../static/css/demo_page.css"/> </head> <body> &nbsp<b>Distribute report</b></br>''') try: p = subprocess.Popen(["python","../distribute.py"], stdout=subprocess.PIPE) for line in iter(p.stdout.readline, ''): self.write('&nbsp') self.write(line) self.write('</br>') p.stdout.close() except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('DistributeHandler ERROR: '+str(e)+'\n') FILE.close() self.write('''</body> </html>''') class UpdateHandler(tornado.web.RequestHandler): def get(self): self.write('''<html xmlns="http://www.w3.org/1999/xhtml"> <head> <title>Update report</title> <link type="text/css" rel="stylesheet" href="../static/css/custom.css"/> <link type="text/css" rel="stylesheet" href="../static/css/demo_page.css"/> </head> <body> &nbsp<b>Update Report</b></br>''') try: p = subprocess.Popen(["python","../update.py"], stdout=subprocess.PIPE) for line in iter(p.stdout.readline, ''): self.write('&nbsp') self.write(line) self.write('</br>') p.stdout.close() except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('UpdateHandler ERROR: '+str(e)+'\n') FILE.close() self.write('''</body> </html>''') class UpdateSensorHandler(tornado.web.RequestHandler): def post(self): db = sqlite3.connect('../DB.db') cursor = db.cursor() sensor = self.request.arguments.get("select") try: if not (sensor[0] != 'all'): cursor.execute('SELECT sname FROM sensors') all_sensors = cursor.fetchall() def update(f,v,s): sql = 'UPDATE sensors SET '+f+'="'+v+'" WHERE sname="'+s+'"' cursor.execute(sql) if "ip" in self.request.arguments: ip = self.request.arguments.get("ip") if not (sensor[0] == 'all'): update("ip",ip[0],sensor[0]) else: for hit in all_sensors: update("ip",ip[0],hit[0]) if "path" in self.request.arguments: path = self.request.arguments.get("path") if not (sensor[0] == 'all'): update("path",path[0],sensor[0]) else: for hit in all_sensors: update("path",path[0],hit[0]) if "uname" in self.request.arguments: uname = self.request.arguments.get("uname") if not (sensor[0] == 'all'): update("uname",uname[0],sensor[0]) else: for hit in all_sensors: update("uname",uname[0],hit[0]) if "cmd" in self.request.arguments: pw = self.request.arguments.get("cmd") if not (sensor[0] == 'all'): update("cmd",cmd[0],sensor[0]) else: for hit in all_sensors: update("cmd",cmd[0],hit[0]) db.commit() self.write('Sensor updated! Refresh page to see changes.') except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('UpdateSensorHandler ERROR: '+str(e)+'\n') FILE.close() self.write(str(e)) cursor.close() db.close() class TuningRulesetsHandler(tornado.web.RequestHandler): def post(self): source_ruleset = self.request.arguments.get("rulesetid") sensor = self.request.arguments.get("sensor") action = self.request.arguments.get("action") db = sqlite3.connect('../DB.db') cursor = db.cursor() sids = '' try: def disable_sid(table,sid): value = sid.split(',') for entry in value: sql = 'INSERT OR IGNORE INTO '+table+' (sid) VALUES ('+entry+')' cursor.execute(sql) def enable_sid(table,sid): sql = 'DELETE FROM '+table+' WHERE sid IN ('+sid+')' cursor.execute(sql) length = len(source_ruleset) counter = 1 for hit in source_ruleset: split = hit.split('.') sql = 'SELECT sidnr from rules WHERE source_name="'+split[0]+'" AND ruleset_name="'+split[1]+'"' cursor.execute(sql) tmp = cursor.fetchall() sids = sids+(",".join(str(x[0]) for x in tmp)) if not (counter == length): sids = sids+"," counter += 1 if not (sensor[0] == 'all'): table = sensor[0]+'_disabled' if not (action[0] == "enable"): disable_sid(table,sids) else: enable_sid(table,sids) else: cursor.execute('SELECT sname FROM sensors') all_sensors = cursor.fetchall() for hit in all_sensors: table = hit[0]+'_disabled' if not (action[0] == "enable"): disable_sid(table,sids) else: enable_sid(table,sids) db.commit() except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('TuningRulesetsHandler ERROR: '+str(e)+'\n') FILE.close() cursor.close() db.close() class TuningRulesHandler(tornado.web.RequestHandler): def post(self): sids = self.request.arguments.get('sidnr') sensor = self.request.arguments.get('sensor') action = self.request.arguments.get('action') db = sqlite3.connect('../DB.db') cursor = db.cursor() def disable_sid(table,sid): sql = 'INSERT OR IGNORE INTO '+table+' (sid) VALUES ('+sid+')' cursor.execute(sql) def enable_sid(table,sid): sql = 'DELETE FROM '+table+' WHERE sid='+sid cursor.execute(sql) try: if not (sensor[0] == "all"): table = sensor[0]+'_disabled' for sid in sids: if not (action[0] == "enable"): disable_sid(table,sid) else: enable_sid(table,sid) else: cursor.execute('SELECT sname FROM sensors') all_sensors = cursor.fetchall() for hit in all_sensors: table = hit[0]+'_disabled' for sid in sids: if not (action[0] == "enable"): disable_sid(table,sid) else: enable_sid(table,sid) db.commit() except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('TuningRulesHandler ERROR: '+str(e)+'\n') FILE.close() cursor.close() db.close() class GetSensorNameHandler(tornado.web.RequestHandler): def get(self): db = sqlite3.connect('../DB.db') cursor = db.cursor() try: cursor.execute('SELECT sname FROM sensors') selectbox = '<select name="select" id="select"><option value="all">all sensors</option>' for sensor in cursor: selectbox = selectbox+'<option value="'+sensor[0]+'">'+sensor[0]+'</option>' selectbox = selectbox+'</select>' self.write(selectbox) except StandardError,e: FILE = open("weberrorlog.txt","a") FILE.write("GetSensorNameHandler ERROR: "+str(e)+"\n") FILE.close() self.write('<select><option>ERROR</option></select>') cursor.close() db.close() class OpenRuleHandler(tornado.web.RequestHandler): def get(self): sid = self.get_argument("sid") db = sqlite3.connect('../DB.db') cursor = db.cursor() try: cursor.execute('SELECT rule_syntax FROM rules WHERE sidnr = (?)', [sid]) rulesyntax = cursor.fetchone() self.render("open_rules.html",rulesyntax=rulesyntax[0]) except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('OpenRuleHandler ERROR: '+str(e)+'\n') FILE.close() cursor.close() db.close() class RemoveSensorHandler(tornado.web.RequestHandler): def post(self): snames = self.request.arguments.get("sensorid") db = sqlite3.connect('../DB.db') cursor = db.cursor() try: for sensor in snames: sql = 'DELETE FROM sensors WHERE sname="%s"' % (sensor) cursor.execute(sql) sql = 'DROP TABLE %s_disabled' % (sensor) cursor.execute(sql) sql = 'DROP TABLE %s_threshold' % (sensor) cursor.execute(sql) db.commit() except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('RemoveSensorHandler ERROR: '+str(e)+'\n') FILE.close() cursor.close() db.close() class AddSensorHandler(tornado.web.RequestHandler): def post(self): db = sqlite3.connect('../DB.db') cursor = db.cursor() if 'sname' not in self.request.arguments: self.write('Sensor NOT added. Input missing. Try again.') elif 'ip' not in self.request.arguments: self.write('Sensor NOT added. Input missing. Try again.') elif 'path' not in self.request.arguments: self.write('Sensor NOT added. Input missing. Try again.') elif 'uname' not in self.request.arguments: self.write('Sensor NOT added. Input missing. Try again.') elif 'cmd' not in self.request.arguments: self.write('Sensor NOT added. Input missing. Try again.') else: sname = self.request.arguments.get("sname") sname = sname[0] ip = self.request.arguments.get("ip") ip = ip[0] path = self.request.arguments.get("path") path = path[0] uname = self.request.arguments.get("uname") uname = uname[0] cmd = self.request.arguments.get("cmd") cmd = cmd[0] try: db = sqlite3.connect('../DB.db') cursor = db.cursor() cursor.execute('''INSERT INTO sensors (sname,ip,path,uname,cmd) VALUES(?,?,?,?,?)''',(sname,ip,path,uname,cmd)) sql = 'CREATE TABLE '+sname+'_disabled (sid INTEGER PRIMARY KEY)' cursor.execute(sql) sql = 'CREATE TABLE '+sname+'_threshold (id INTEGER PRIMARY KEY, sid INTEGER, type TEXT, syntax TEXT, comment TEXT, sensor TEXT)' cursor.execute(sql) self.write(sname+' added! Refresh page to see changes.') db.commit() except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('AddSensorHandler ERROR: '+str(e)+'\n') FILE.close() self.write(str(e)) cursor.close() db.close() class GetSensorsHandler(tornado.web.RequestHandler): def get(self): db = sqlite3.connect('../DB.db') cursor = db.cursor() sensors = [] try: cursor.execute('SELECT * FROM sensors') for row in cursor: sname,ip,path,uname,cmd = row check = "<center><input type='checkbox' name='sensorid' value='%s'></center>" % (sname) sensor = (check,sname,ip,path,uname,cmd) sensors.append(sensor) except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('GetSensorsHandler ERROR: '+str(e)+'\n') FILE.close() cursor.close() db.close() self.write(json.dumps({"aaData":sensors},sort_keys=True,indent=4)) class GetRulesHandler(tornado.web.RequestHandler): def get(self): db = sqlite3.connect('../DB.db') cursor = db.cursor() details = '<img class="sig" src="static/images/open.png">' sigs = [] try: cursor.execute('SELECT * FROM rules') all_rules = cursor.fetchall() cursor.execute('SELECT sname FROM sensors') all_sensors = cursor.fetchall() for row in all_rules: sidnr,revnr,source,ruleset,name,ref,date,rule = row status ='' for hit in all_sensors: sql = 'SELECT sid FROM '+hit[0]+'_disabled WHERE sid='+str(sidnr) cursor.execute(sql) res = cursor.fetchone() sql = 'SELECT sid FROM %s_threshold WHERE sid="%s"' % (hit[0],sidnr) cursor.execute(sql) tmp2 = cursor.fetchone() if not (res is None): if not (tmp2 is None): status = status+'<font class="red">'+hit[0]+'</font><font class="yellow"><b>!</b></font>&nbsp;' #red/yellow else: status = status+'<font class="red">'+hit[0]+'</font>&nbsp;' #red else: if not (tmp2 is None): status = status+'<font class="green">'+hit[0]+'</font><font class="yellow"><b>!</b></font>&nbsp;' #green/yellow else: status = status+'<font class="green">'+hit[0]+'</font>&nbsp;' #green check = '<input type="checkbox" name="sidnr" value="%i">' % (sidnr) source_ruleset = '%s.%s' % (source,ruleset) sig = (check, sidnr, revnr, date, name, source_ruleset, ref, status, details) sigs.append(sig) except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('GetRulesetsHandler ERROR: '+str(e)+'\n') FILE.close() cursor.close() db.close() self.write(json.dumps({"aaData":sigs},sort_keys=True,indent=4)) class GetRulesetsHandler(tornado.web.RequestHandler): def get(self): db = sqlite3.connect('../DB.db') cursor = db.cursor() rulesets = [] try: cursor.execute("SELECT DISTINCT ruleset_name, source_name FROM rules") query = cursor.fetchall() for row in query: ruleset,source = row source_ruleset = '%s.%s' % (source,ruleset) check = '<center><input type="checkbox" name="rulesetid" value="%s"></center>' % (source_ruleset) sql = 'SELECT sidnr from rules WHERE source_name="%s" AND ruleset_name="%s"' % (source,ruleset) cursor.execute(sql) tmp = cursor.fetchall() count = len(tmp) sids = ','.join(str(x[0]) for x in tmp) cursor.execute('SELECT sname FROM sensors') all_sensors = cursor.fetchall() sql = 'SELECT MAX(date) FROM rules WHERE source_name="%s" AND ruleset_name="%s"' % (source,ruleset) cursor.execute(sql) max_date = cursor.fetchone() status = '' for x in all_sensors: sensor = x[0] sql = 'SELECT sid FROM %s_disabled WHERE sid IN ( %s )' % (sensor,sids) cursor.execute(sql) tmp2 = cursor.fetchall() scount = len(tmp2) if not (scount == count): if not (scount == 0): status = status+'<font class="green">%s</font><font class="red">%s</font>&nbsp;' % (sensor,scount) else: status = status+'<font class="green">%s</font>&nbsp;' % sensor else: status = status+'<font class="red">%s</font>&nbsp;' % sensor rset = (check,source_ruleset,max_date,count,status) rulesets.append(rset) except StandardError,e: FILE = open('weberrorlog.txt','a') FILE.write('GetRulesetsHandler ERROR: '+str(e)+'\n') FILE.close() cursor.close() db.close() self.write(json.dumps({"aaData":rulesets},sort_keys=True,indent=4)) class ATuningHandler(tornado.web.RequestHandler): def get(self): self.render("atuning.html") class ATuningHelpHandler(tornado.web.RequestHandler): def get(self): self.render("atuninghelp.html") class SensorsHandler(tornado.web.RequestHandler): def get(self): self.render("sensors.html") class RulesHandler(tornado.web.RequestHandler): def get(self): self.render("rules.html") class RulesetsHandler(tornado.web.RequestHandler): def get(self): self.render("rulesets.html") class MainHandler(tornado.web.RequestHandler): def get(self): self.render("index.html") def main(): tornado.options.parse_command_line() http_server = tornado.httpserver.HTTPServer(Application()) http_server.listen(options.port) tornado.autoreload.start() tornado.ioloop.IOLoop.instance().start() if __name__ == "__main__": main()
gpl-3.0
-5,249,010,472,040,336,000
36.870748
188
0.496102
false
elidaian/sudoku
src/server/users.py
1
3651
""" users.py Created on: Aug 17 2013 Author: eli """ class UserPermission(object): """ Describes a permission for an operation for an user. """ PERMISSIONS = [] _curr_permission_bit = 1 def __init__(self, name, description, is_default): """ Construct a permission given its description. """ super(UserPermission, self).__init__() self.name = name self.description = description self.flag = UserPermission._curr_permission_bit self.is_default = is_default UserPermission.PERMISSIONS.append(self) UserPermission._curr_permission_bit <<= 1 @staticmethod def get_mask(permissions): """ Returns a mask containing the given permissions. """ res = 0 for permission in permissions: res |= permission.flag return res @staticmethod def parse_mask(mask): """ Return a list of permissions given the mask. """ res = [] for permission in UserPermission.PERMISSIONS: if permission.flag & mask: res.append(permission) return res def __eq__(self, other): """ Checks the equality of this object to other object. """ return self.flag == other.flag def permissions_to_mask(permissions): """ Create a mask of permissions given the permissions list. """ res = 0 for permission in permissions: res |= permission.flag return res # Define the permissions PERM_CREATE_BOARD = UserPermission("CREATE_BOARD", "Create boards", True) PERM_MANAGE_USERS = UserPermission("MANAGE_USERS", "Manage users", False) PERM_SHOW_OTHER_USER_BOARDS = UserPermission("SHOW_OTHER_USERS_BOARDS", "Show other user\'s boards", False) class User(object): """ Represents a logged in user. """ def __init__(self, id, username, display, permissions): """ Initialize a user given its ID, username, display name and permissions. """ super(User, self).__init__() self.id = id self.username = username if not display: self.display = username else: self.display = display self.permissions = UserPermission.parse_mask(permissions) def has_permission(self, permission): """ Returns True if this user has the requested permission. """ return permission in self.permissions def allow_create_board(self): """ Returns True if this user is allowed to create boards. """ return self.has_permission(PERM_CREATE_BOARD) def allow_manage_users(self): """ Returns True if this user is allowed to manage other users. """ return self.has_permission(PERM_MANAGE_USERS) def allow_other_user_boards(self): """ Returns True if this user is allowed to see other users boards. """ return self.has_permission(PERM_SHOW_OTHER_USER_BOARDS) def to_json(self): """ Returns a jsonable object with the same data as this user. """ return {"id" : self.id, "username" : self.username, "display" : self.display, "permisions": permissions_to_mask(self.permissions)} def user_from_json(json): """ Create a User object from its representing json. """ return User(json["id"], json["username"], json["display"], json["permissions"])
gpl-3.0
2,345,246,301,992,049,000
27.748031
83
0.576828
false
tedlaz/pyted
misthodosia/m13/f_fmy_old.py
1
1057
# -*- coding: utf-8 -*- ''' Created on 18 Νοε 2012 @author: tedlaz ''' from PyQt4 import QtGui, QtCore from gui import ui_fmy from utils.fmy_etoys import makeFMYFile class dlg(QtGui.QDialog): def __init__(self, args=None, parent=None): super(dlg, self).__init__(parent) self.ui = ui_fmy.Ui_Dialog() self.ui.setupUi(self) self.makeConnections() if parent: self.db = parent.db else: self.db = '' def makeConnections(self): QtCore.QObject.connect(self.ui.b_makeFile, QtCore.SIGNAL("clicked()"),self.makeFile) def makeFile(self): defaultPdfName = 'JL10' fName = QtGui.QFileDialog.getSaveFileName(self,u"Ονομα αρχείου",defaultPdfName) makeFMYFile(fName,self.ui.t_xrisi.text(),self.db) self.accept() if __name__ == '__main__': import sys app = QtGui.QApplication(sys.argv) form = dlg(sys.argv) form.show() app.exec_()
gpl-3.0
1,785,956,018,139,974,700
24.769231
92
0.56334
false
schoolie/bokeh
bokeh/models/plots.py
1
20026
''' Models for representing top-level plot objects. ''' from __future__ import absolute_import from six import string_types from ..core.enums import Location from ..core.properties import Auto, Bool, Dict, Either, Enum, Include, Instance, Int, List, Override, String from ..core.property_mixins import LineProps, FillProps from ..core.query import find from ..core.validation import error, warning from ..core.validation.errors import REQUIRED_RANGE from ..core.validation.warnings import (MISSING_RENDERERS, NO_DATA_RENDERERS, MALFORMED_CATEGORY_LABEL, SNAPPED_TOOLBAR_ANNOTATIONS) from ..util.plot_utils import _list_attr_splat, _select_helper from ..util.string import nice_join from .annotations import Legend, Title from .axes import Axis from .glyphs import Glyph from .grids import Grid from .layouts import LayoutDOM from .ranges import Range, FactorRange from .renderers import DataRenderer, DynamicImageRenderer, GlyphRenderer, Renderer, TileRenderer from .sources import DataSource, ColumnDataSource from .tools import Tool, Toolbar, ToolEvents class Plot(LayoutDOM): ''' Model representing a plot, containing glyphs, guides, annotations. ''' def __init__(self, **kwargs): if "tool_events" not in kwargs: kwargs["tool_events"] = ToolEvents() if "toolbar" in kwargs and "logo" in kwargs: raise ValueError("Conflicing properties set on plot: toolbar, logo.") if "toolbar" in kwargs and "tools" in kwargs: raise ValueError("Conflicing properties set on plot: toolbar, tools.") if "toolbar" not in kwargs: tools = kwargs.pop('tools', []) logo = kwargs.pop('logo', 'normal') kwargs["toolbar"] = Toolbar(tools=tools, logo=logo) super(LayoutDOM, self).__init__(**kwargs) def select(self, *args, **kwargs): ''' Query this object and all of its references for objects that match the given selector. There are a few different ways to call the ``select`` method. The most general is to supply a JSON-like query dictionary as the single argument or as keyword arguments: Args: selector (JSON-like) : some sample text Keyword Arguments: kwargs : query dict key/values as keyword arguments For convenience, queries on just names can be made by supplying the ``name`` string as the single parameter: Args: name (str) : the name to query on Also queries on just type can be made simply by supplying the ``Model`` subclass as the single parameter: Args: type (Model) : the type to query on Returns: seq[Model] Examples: .. code-block:: python # These two are equivalent p.select({"type": HoverTool}) p.select(HoverTool) # These two are also equivalent p.select({"name": "mycircle"}) p.select("mycircle") # Keyword arguments can be supplied in place of selector dict p.select({"name": "foo", "type": HoverTool}) p.select(name="foo", type=HoverTool) ''' selector = _select_helper(args, kwargs) # Want to pass selector that is a dictionary return _list_attr_splat(find(self.references(), selector, {'plot': self})) def row(self, row, gridplot): ''' Return whether this plot is in a given row of a GridPlot. Args: row (int) : index of the row to test gridplot (GridPlot) : the GridPlot to check Returns: bool ''' return self in gridplot.row(row) def column(self, col, gridplot): ''' Return whether this plot is in a given column of a GridPlot. Args: col (int) : index of the column to test gridplot (GridPlot) : the GridPlot to check Returns: bool ''' return self in gridplot.column(col) def _axis(self, *sides): objs = [] for s in sides: objs.extend(getattr(self, s, [])) axis = [obj for obj in objs if isinstance(obj, Axis)] return _list_attr_splat(axis) @property def xaxis(self): ''' Splattable list of :class:`~bokeh.models.axes.Axis` objects for the x dimension. ''' return self._axis("above", "below") @property def yaxis(self): ''' Splattable list of :class:`~bokeh.models.axes.Axis` objects for the y dimension. ''' return self._axis("left", "right") @property def axis(self): ''' Splattable list of :class:`~bokeh.models.axes.Axis` objects. ''' return _list_attr_splat(self.xaxis + self.yaxis) @property def legend(self): ''' Splattable list of :class:`~bokeh.models.annotations.Legend` objects. ''' legends = [obj for obj in self.renderers if isinstance(obj, Legend)] return _list_attr_splat(legends) def _grid(self, dimension): grid = [obj for obj in self.renderers if isinstance(obj, Grid) and obj.dimension==dimension] return _list_attr_splat(grid) @property def xgrid(self): ''' Splattable list of :class:`~bokeh.models.grids.Grid` objects for the x dimension. ''' return self._grid(0) @property def ygrid(self): ''' Splattable list of :class:`~bokeh.models.grids.Grid` objects for the y dimension. ''' return self._grid(1) @property def grid(self): ''' Splattable list of :class:`~bokeh.models.grids.Grid` objects. ''' return _list_attr_splat(self.xgrid + self.ygrid) @property def tools(self): return self.toolbar.tools @tools.setter def tools(self, tools): self.toolbar.tools = tools def add_layout(self, obj, place='center'): ''' Adds an object to the plot in a specified place. Args: obj (Renderer) : the object to add to the Plot place (str, optional) : where to add the object (default: 'center') Valid places are: 'left', 'right', 'above', 'below', 'center'. Returns: None ''' valid_places = ['left', 'right', 'above', 'below', 'center'] if place not in valid_places: raise ValueError( "Invalid place '%s' specified. Valid place values are: %s" % (place, nice_join(valid_places)) ) if hasattr(obj, 'plot'): if obj.plot is not None: raise ValueError("object to be added already has 'plot' attribute set") obj.plot = self self.renderers.append(obj) if place is not 'center': getattr(self, place).append(obj) def add_tools(self, *tools): ''' Adds tools to the plot. Args: *tools (Tool) : the tools to add to the Plot Returns: None ''' if not all(isinstance(tool, Tool) for tool in tools): raise ValueError("All arguments to add_tool must be Tool subclasses.") for tool in tools: if tool.plot is not None: raise ValueError("tool %s to be added already has 'plot' attribute set" % tool) tool.plot = self if hasattr(tool, 'overlay'): self.renderers.append(tool.overlay) self.toolbar.tools.append(tool) def add_glyph(self, source_or_glyph, glyph=None, **kw): ''' Adds a glyph to the plot with associated data sources and ranges. This function will take care of creating and configuring a Glyph object, and then add it to the plot's list of renderers. Args: source (DataSource) : a data source for the glyphs to all use glyph (Glyph) : the glyph to add to the Plot Keyword Arguments: Any additional keyword arguments are passed on as-is to the Glyph initializer. Returns: GlyphRenderer ''' if glyph is not None: source = source_or_glyph else: source, glyph = ColumnDataSource(), source_or_glyph if not isinstance(source, DataSource): raise ValueError("'source' argument to add_glyph() must be DataSource subclass") if not isinstance(glyph, Glyph): raise ValueError("'glyph' argument to add_glyph() must be Glyph subclass") g = GlyphRenderer(data_source=source, glyph=glyph, **kw) self.renderers.append(g) return g def add_tile(self, tile_source, **kw): ''' Adds new TileRenderer into the Plot.renderers Args: tile_source (TileSource) : a tile source instance which contain tileset configuration Keyword Arguments: Additional keyword arguments are passed on as-is to the tile renderer Returns: TileRenderer : TileRenderer ''' tile_renderer = TileRenderer(tile_source=tile_source, **kw) self.renderers.append(tile_renderer) return tile_renderer def add_dynamic_image(self, image_source, **kw): ''' Adds new DynamicImageRenderer into the Plot.renderers Args: image_source (ImageSource) : a image source instance which contain image configuration Keyword Arguments: Additional keyword arguments are passed on as-is to the dynamic image renderer Returns: DynamicImageRenderer : DynamicImageRenderer ''' image_renderer = DynamicImageRenderer(image_source=image_source, **kw) self.renderers.append(image_renderer) return image_renderer @error(REQUIRED_RANGE) def _check_required_range(self): missing = [] if not self.x_range: missing.append('x_range') if not self.y_range: missing.append('y_range') if missing: return ", ".join(missing) + " [%s]" % self @warning(MISSING_RENDERERS) def _check_missing_renderers(self): if len(self.renderers) == 0: return str(self) @warning(NO_DATA_RENDERERS) def _check_no_data_renderers(self): if len(self.select(DataRenderer)) == 0: return str(self) @warning(MALFORMED_CATEGORY_LABEL) def _check_colon_in_category_label(self): if not self.x_range: return if not self.y_range: return broken = [] for range_name in ['x_range', 'y_range']: category_range = getattr(self, range_name) if not isinstance(category_range, FactorRange): continue for value in category_range.factors: if not isinstance(value, string_types): break if ':' in value: broken.append((range_name, value)) break if broken: field_msg = ' '.join('[range:%s] [first_value: %s]' % (field, value) for field, value in broken) return '%s [renderer: %s]' % (field_msg, self) @warning(SNAPPED_TOOLBAR_ANNOTATIONS) def _check_snapped_toolbar_and_axis(self): if not self.toolbar_sticky: return if self.toolbar_location is None: return objs = getattr(self, self.toolbar_location) if len(objs) > 0: return str(self) x_range = Instance(Range, help=""" The (default) data range of the horizontal dimension of the plot. """) y_range = Instance(Range, help=""" The (default) data range of the vertical dimension of the plot. """) x_mapper_type = Either(Auto, String, help=""" What kind of mapper to use to convert x-coordinates in data space into x-coordinates in screen space. Typically this can be determined automatically, but this property can be useful to, e.g., show datetime values as floating point "seconds since epoch" instead of formatted dates. """) y_mapper_type = Either(Auto, String, help=""" What kind of mapper to use to convert y-coordinates in data space into y-coordinates in screen space. Typically this can be determined automatically, but this property can be useful to, e.g., show datetime values as floating point "seconds since epoch" instead of formatted dates """) extra_x_ranges = Dict(String, Instance(Range), help=""" Additional named ranges to make available for mapping x-coordinates. This is useful for adding additional axes. """) extra_y_ranges = Dict(String, Instance(Range), help=""" Additional named ranges to make available for mapping y-coordinates. This is useful for adding additional axes. """) hidpi = Bool(default=True, help=""" Whether to use HiDPI mode when available. """) title = Instance(Title, default=lambda: Title(text=""), help=""" A title for the plot. Can be a text string or a Title annotation. """) title_location = Enum(Location, default="above", help=""" Where the title will be located. Titles on the left or right side will be rotated. """) outline_props = Include(LineProps, help=""" The %s for the plot border outline. """) outline_line_color = Override(default="#e5e5e5") renderers = List(Instance(Renderer), help=""" A list of all renderers for this plot, including guides and annotations in addition to glyphs and markers. This property can be manipulated by hand, but the ``add_glyph`` and ``add_layout`` methods are recommended to help make sure all necessary setup is performed. """) toolbar = Instance(Toolbar, help=""" The toolbar associated with this plot which holds all the tools. The toolbar is automatically created with the plot. """) toolbar_location = Enum(Location, default="right", help=""" Where the toolbar will be located. If set to None, no toolbar will be attached to the plot. """) toolbar_sticky = Bool(default=True, help=""" Stick the toolbar to the edge of the plot. Default: True. If False, the toolbar will be outside of the axes, titles etc. """) tool_events = Instance(ToolEvents, help=""" A ToolEvents object to share and report tool events. """) left = List(Instance(Renderer), help=""" A list of renderers to occupy the area to the left of the plot. """) right = List(Instance(Renderer), help=""" A list of renderers to occupy the area to the right of the plot. """) above = List(Instance(Renderer), help=""" A list of renderers to occupy the area above of the plot. """) below = List(Instance(Renderer), help=""" A list of renderers to occupy the area below of the plot. """) plot_height = Int(600, help=""" Total height of the entire plot (including any axes, titles, border padding, etc.) .. note:: This corresponds directly to the height of the HTML canvas that will be used. """) plot_width = Int(600, help=""" Total width of the entire plot (including any axes, titles, border padding, etc.) .. note:: This corresponds directly to the width of the HTML canvas that will be used. """) inner_width = Int(readonly=True, help=""" This is the exact width of the plotting canvas, i.e. the width of the actual plot, without toolbars etc. Note this is computed in a web browser, so this property will work only in backends capable of bidirectional communication (server, notebook). .. note:: This is an experimental feature and the API may change in near future. """) inner_height = Int(readonly=True, help=""" This is the exact height of the plotting canvas, i.e. the height of the actual plot, without toolbars etc. Note this is computed in a web browser, so this property will work only in backends capable of bidirectional communication (server, notebook). .. note:: This is an experimental feature and the API may change in near future. """) layout_width = Int(readonly=True, help=""" This is the exact width of the layout, i.e. the height of the actual plot, with toolbars etc. Note this is computed in a web browser, so this property will work only in backends capable of bidirectional communication (server, notebook). .. note:: This is an experimental feature and the API may change in near future. """) layout_height = Int(readonly=True, help=""" This is the exact height of the layout, i.e. the height of the actual plot, with toolbars etc. Note this is computed in a web browser, so this property will work only in backends capable of bidirectional communication (server, notebook). .. note:: This is an experimental feature and the API may change in near future. """) background_props = Include(FillProps, help=""" The %s for the plot background style. """) background_fill_color = Override(default='#ffffff') border_props = Include(FillProps, help=""" The %s for the plot border style. """) border_fill_color = Override(default='#ffffff') min_border_top = Int(help=""" Minimum size in pixels of the padding region above the top of the central plot region. .. note:: This is a *minimum*. The padding region may expand as needed to accommodate titles or axes, etc. """) min_border_bottom = Int(help=""" Minimum size in pixels of the padding region below the bottom of the central plot region. .. note:: This is a *minimum*. The padding region may expand as needed to accommodate titles or axes, etc. """) min_border_left = Int(help=""" Minimum size in pixels of the padding region to the left of the central plot region. .. note:: This is a *minimum*. The padding region may expand as needed to accommodate titles or axes, etc. """) min_border_right = Int(help=""" Minimum size in pixels of the padding region to the right of the central plot region. .. note:: This is a *minimum*. The padding region may expand as needed to accommodate titles or axes, etc. """) min_border = Int(5, help=""" A convenience property to set all all the ``min_border_X`` properties to the same value. If an individual border property is explicitly set, it will override ``min_border``. """) h_symmetry = Bool(True, help=""" Whether the total horizontal padding on both sides of the plot will be made equal (the left or right padding amount, whichever is larger). """) v_symmetry = Bool(False, help=""" Whether the total vertical padding on both sides of the plot will be made equal (the top or bottom padding amount, whichever is larger). """) lod_factor = Int(10, help=""" Decimation factor to use when applying level-of-detail decimation. """) lod_threshold = Int(2000, help=""" A number of data points, above which level-of-detail downsampling may be performed by glyph renderers. Set to ``None`` to disable any level-of-detail downsampling. """) lod_interval = Int(300, help=""" Interval (in ms) during which an interactive tool event will enable level-of-detail downsampling. """) lod_timeout = Int(500, help=""" Timeout (in ms) for checking whether interactive tool events are still occurring. Once level-of-detail mode is enabled, a check is made every ``lod_timeout`` ms. If no interactive tool events have happened, level-of-detail mode is disabled. """) webgl = Bool(False, help=""" Whether WebGL is enabled for this plot. If True, the glyphs that support this will render via WebGL instead of the 2D canvas. """)
bsd-3-clause
-4,859,454,633,804,849,000
31.0416
109
0.624688
false
expertanalytics/fagkveld
worldmap/src/worldmap/model/location.py
1
1192
from typing import Dict, List, Tuple, Set, Optional from abc import abstractmethod import numpy class Location: name: str = "" long_name: str = "" border_x: List[numpy.ndarray] border_hull_x: List[numpy.ndarray] border_y: List[numpy.ndarray] = [] border_hull_y: List[numpy.ndarray] = [] neighbors: "Locations" = {} parent: "Optional[Location]" = None children: "Locations" = {} level: int = 0 alpha3code: str = "" color: str = "" def __init__( self, name: str, long_name: Optional[str] = None, parent: "Optional[Location]" = None, ): self.name = name self.long_name = long_name if long_name else name self.parent = parent self.border_x = [] self.border_hull_x = [] self.border_y = [] self.border_hull_y = [] @property def location_x(self) -> numpy.ndarray: pass # TODO: implement here @property def location_y(self) -> numpy.ndarray: pass # TODO: implement here def __str__(self): return "Location('{}')".format(self.long_name) Locations = Dict[str, Location]
bsd-2-clause
-622,663,362,204,864,100
21.923077
57
0.557886
false
dhp-denero/LibrERP
dt_product_brand/product_brand.py
1
3987
# -*- encoding: utf-8 -*- ################################################################################# # # # product_brand for OpenERP # # Copyright (C) 2009 NetAndCo (<http://www.netandco.net>). # # Authors, Mathieu Lemercier, mathieu@netandco.net, # # Franck Bret, franck@netandco.net # # Copyright (C) 2011 Akretion Benoît Guillot <benoit.guillot@akretion.com> # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU Affero General Public License as # # published by the Free Software Foundation, either version 3 of the # # License, or (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU Affero General Public License for more details. # # # # You should have received a copy of the GNU Affero General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # # # ################################################################################# from openerp.osv.orm import Model from openerp.osv import fields class product_brand(Model): _name = 'product.brand' _columns = { 'name': fields.char('Brand Name', size=32), 'description': fields.text('Description', translate=True), 'partner_id': fields.many2one('res.partner', 'partner', help='Select a partner for this brand if it exist'), 'logo': fields.binary('Logo File') } _order = 'name' _sql_constraints = [ ('name_uniq', 'unique (name)', 'The name of the Brand must be unique !') ] class product_template(Model): _name = 'product.template' _inherit = 'product.template' _columns = { 'product_brand_id': fields.many2one('product.brand', 'Brand', help='Select a brand for this product'), } class product_product(Model): _name = 'product.product' _inherit = 'product.product' def onchange_product_brand_id(self, cr, uid, ids, product_brand_id, context=None): """ When category changes, we search for taxes, UOM and product type """ if context is None: context = self.pool['res.users'].context_get(cr, uid, context=context) res = {} if not product_brand_id: res = { 'manufacturer': False, } else: brand_data = self.pool['product.brand'].read(cr, uid, product_brand_id, [], context=context) if brand_data['partner_id']: res['manufacturer'] = brand_data['partner_id'] return {'value': res, } def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False): if context and context.get('product_brand_id', False): product_ids = self.pool['product.product'].search(cr, uid, [('product_brand_id', '=', context['product_brand_id'])]) if product_ids: product_ids = list(set(product_ids)) args.append(['id', 'in', product_ids]) return super(product_product, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
agpl-3.0
8,626,166,335,862,885,000
47.609756
136
0.493979
false
jithinbp/pslab-desktop-apps
psl_res/GUI/D_PHYSICS/B_physics/B_SpeedOfSound.py
1
3745
#!/usr/bin/python """ :: This experiment is used to study non-inverting amplifiers """ from __future__ import print_function from PSL_Apps.utilitiesClass import utilitiesClass from PSL_Apps.templates import ui_template_graph_nofft import numpy as np from PyQt4 import QtGui,QtCore import pyqtgraph as pg import sys,functools,time params = { 'image' : 'halfwave.png', 'name':'Speed of\nSound', 'hint':''' Measure speed of sound using a 40KHz transmit piezo and receiver.<br> ''' } class AppWindow(QtGui.QMainWindow, ui_template_graph_nofft.Ui_MainWindow,utilitiesClass): def __init__(self, parent=None,**kwargs): super(AppWindow, self).__init__(parent) self.setupUi(self) self.I=kwargs.get('I',None) self.setWindowTitle(self.I.H.version_string+' : '+params.get('name','').replace('\n',' ') ) from PSL.analyticsClass import analyticsClass self.math = analyticsClass() self.prescalerValue=0 self.plot=self.add2DPlot(self.plot_area,enableMenu=False) #self.enableCrossHairs(self.plot,[]) labelStyle = {'color': 'rgb(255,255,255)', 'font-size': '11pt'} self.plot.setLabel('left','V (CH1)', units='V',**labelStyle) self.plot.setLabel('bottom','Time', units='S',**labelStyle) self.plot.setYRange(-8.5,8.5) self.tg=0.5 self.max_samples=10000 self.samples = self.max_samples self.timer = QtCore.QTimer() self.legend = self.plot.addLegend(offset=(-10,30)) self.curveCH1 = self.addCurve(self.plot,'RAMP In(CH1)') self.autoRange() self.WidgetLayout.setAlignment(QtCore.Qt.AlignLeft) self.ControlsLayout.setAlignment(QtCore.Qt.AlignRight) a1={'TITLE':'Acquire Data','FUNC':self.run,'TOOLTIP':'Sets SQR1 to HIGH, and immediately records the ramp'} self.ampGain = self.buttonIcon(**a1) self.WidgetLayout.addWidget(self.ampGain) self.WidgetLayout.addWidget(self.addSQR1(self.I)) #Control widgets a1={'TITLE':'TIMEBASE','MIN':0,'MAX':9,'FUNC':self.set_timebase,'UNITS':'S','TOOLTIP':'Set Timebase of the oscilloscope'} self.ControlsLayout.addWidget(self.dialIcon(**a1)) G = self.gainIcon(FUNC=self.I.set_gain,LINK=self.gainChanged) self.ControlsLayout.addWidget(G) G.g1.setCurrentIndex(1);G.g2.setEnabled(False) self.running=True self.fit = False def gainChanged(self,g): self.autoRange() def set_timebase(self,g): timebases = [0.5,1,2,4,8,32,128,256,512,1024] self.prescalerValue=[0,0,0,0,1,1,2,2,3,3,3][g] samplescaling=[1,1,1,1,1,0.5,0.4,0.3,0.2,0.2,0.1] self.tg=timebases[g] self.samples = int(self.max_samples*samplescaling[g]) return self.autoRange() def autoRange(self): xlen = self.tg*self.samples*1e-6 self.plot.autoRange(); chan = self.I.analogInputSources['CH1'] R = [chan.calPoly10(0),chan.calPoly10(1023)] R[0]=R[0]*.9;R[1]=R[1]*.9 self.plot.setLimits(yMax=max(R),yMin=min(R),xMin=0,xMax=xlen) self.plot.setYRange(min(R),max(R)) self.plot.setXRange(0,xlen) return self.samples*self.tg*1e-6 def run(self): try: self.ampGain.value.setText('reading...') x,y = self.I.capture_fullspeed('CH3',self.samples,self.tg,'FIRE_PULSES',interval=50) self.curveCH1.setData(x*1e-6,y) #self.displayCrossHairData(self.plot,False,self.samples,self.I.timebase,[y],[(0,255,0)]) self.I.set_state(SQR1=False) #Set SQR1 to 0 return 'Done' except Exception,e: print (e) return 'Error' def saveData(self): self.saveDataWindow([self.curveCH1],self.plot) def closeEvent(self, event): self.running=False self.timer.stop() self.finished=True def __del__(self): self.timer.stop() print('bye') if __name__ == "__main__": from PSL import sciencelab app = QtGui.QApplication(sys.argv) myapp = AppWindow(I=sciencelab.connect()) myapp.show() sys.exit(app.exec_())
gpl-3.0
8,507,347,280,602,509,000
26.335766
123
0.695861
false
massimovassalli/SingleCellForceSpectroscopy
sifork/qt/qtView.py
1
7581
from PyQt4 import QtCore, QtGui try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: _fromUtf8 = lambda s: s import os as os import sys import pyqtgraph as pg import numpy as np import Ui_qtView as qtView_face from sifork import experiment pg.setConfigOption('background', 'w') pg.setConfigOption('foreground', 'k') htmlpre = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">\n<html><head><meta name="qrichtext" content="1" /><style type="text/css">\np, li { white-space: pre-wrap; }\n</style></head><body style=" font-family:"Ubuntu"; font-size:11pt; font-weight:400; font-style:normal;">\n<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-size:8pt;">' htmlpost = '</span></p></body></html>' class curveWindow ( QtGui.QMainWindow ): iter = 0 prev = 0 cRosso = QtGui.QColor(255,0,0) cVerde = QtGui.QColor(50,255,50) cNero = QtGui.QColor(0,0,0) def __init__ ( self, parent = None ): QtGui.QMainWindow.__init__( self, parent ) self.setWindowTitle( 'qtView' ) self.ui = qtView_face.Ui_facewindow() self.ui.setupUi( self ) self.setConnections() self.exp = experiment.experiment() def addFiles(self, fnames = None): if fnames == None: fnames = QtGui.QFileDialog.getOpenFileNames(self, 'Select files', './') QtCore.QCoreApplication.processEvents() pmax = len(fnames) QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor)) progress = QtGui.QProgressDialog("Opening files...", "Cancel opening", 0, pmax); i=0 for fname in fnames: QtCore.QCoreApplication.processEvents() self.exp.addFiles([str(fname)]) progress.setValue(i) i=i+1 if (progress.wasCanceled()): break progress.setValue(pmax) QtGui.QApplication.restoreOverrideCursor() self.refillList() def addDirectory(self,dirname=None): if dirname == None: dirname = QtGui.QFileDialog.getExistingDirectory(self, 'Select a directory', './') if not os.path.isdir(dirname): return QtCore.QCoreApplication.processEvents() pmax = len(os.listdir(dirname)) QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor)) progress = QtGui.QProgressDialog("Opening files...", "Cancel opening", 0, pmax); i=0 for fnamealone in os.listdir(dirname): #if i % 100 == 0: QtCore.QCoreApplication.processEvents() fname = os.path.join(str(dirname), fnamealone) self.exp.addFiles([str(fname)]) progress.setValue(i) i=i+1 if (progress.wasCanceled()): break progress.setValue(pmax) QtGui.QApplication.restoreOverrideCursor() self.refillList() def refillList(self): scena = QtGui.QGraphicsScene() width = self.ui.griglia.width() height = self.ui.griglia.height() N = len(self.exp) self.ui.slide1.setMaximum(N) self.ui.slide2.setMaximum(N) self.ui.slide3.setMaximum(N) gNx = np.sqrt(N*width/height) Nx = int(np.ceil(gNx)) if int(gNx) == Nx: Nx+=1 L = int(width/Nx) i = 0 j = 0 k=0 if L<=3: L=3 while i*Nx+j<N: h = L-2 w = L-2 rect = QtCore.QRectF(j*(L)+1, i*(L)+1, h, w) idrect = scena.addRect(rect, pen = QtGui.QPen(self. cVerde,0) ,brush = self. cVerde ) j+=1 k+=1 if j == Nx: j=0 i+=1 scena.wheelEvent = self.scorri self.ui.griglia.setScene(scena) self.ui.slide1.setValue(1) # og = self.ui.griglia.items() # for i in range(len(og)): # if self.curves[-i-1].inconsistency: # og[i].setBrush(self.cRosso) # og[i].setPen(self.cRosso) self.ui.griglia.invalidateScene() return True def scorri(self,ev=None): delta = ev.delta()/120 self.ui.slide2.setSliderPosition(self.ui.slide2.sliderPosition()-delta) def sqSwitch(self,i,n): og = self.ui.griglia.items() if n: c = self.cNero else: c = og[-i].brush().color() og[-i].setPen(c) def goToCurve(self,dove): self.ui.labFilename.setText(htmlpre + self.exp[dove-1].basename + htmlpost) if self.prev != 0: self.sqSwitch(self.prev,False) self.sqSwitch(dove,True) self.prev = dove self.viewCurve(dove) def updateCurve(self): self.viewCurve(self.ui.slide1.value(),autorange=False) def refreshCurve(self): self.viewCurve(self.ui.slide1.value(),autorange=True) def viewCurve(self,dove = 1,autorange=True): dove -= 1 self.ui.grafo.clear() for p in self.exp[dove]: if p == self.exp[dove][-1]: self.ui.grafo.plot(p.z,p.f,pen='b') else: self.ui.grafo.plot(p.z,p.f) if autorange: self.ui.grafo.autoRange() def setConnections(self): # QtCore.QObject.connect(self.ui.slide1, QtCore.SIGNAL(_fromUtf8("actionTriggered(int)")), self.moveJumping) QtCore.QObject.connect(self.ui.slide1, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.ui.slide2.setValue) QtCore.QObject.connect(self.ui.slide1, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.ui.slide3.setValue) QtCore.QObject.connect(self.ui.slide2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.ui.slide1.setValue) QtCore.QObject.connect(self.ui.slide3, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.ui.slide1.setValue) QtCore.QObject.connect(self.ui.slide1, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.goToCurve ) # QtCore.QObject.connect(self.ui.slide2, QtCore.SIGNAL(_fromUtf8("actionTriggered(int)")), self.moveJumping) # QtCore.QObject.connect(self.ui.slide2, QtCore.SIGNAL(_fromUtf8("sliderReleased()")), self.moveJumping) #QtCore.QObject.connect(self.ui.slide1, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")), self.goToCurve) QtCore.QObject.connect(self.ui.bAddDir, QtCore.SIGNAL(_fromUtf8("clicked()")), self.addDirectory) QtCore.QObject.connect(self.ui.bAddFiles, QtCore.SIGNAL(_fromUtf8("clicked()")), self.addFiles) #QtCore.QObject.connect(self.ui.pushButton_3, QtCore.SIGNAL(_fromUtf8("clicked()")), self.switchColor) #QtCore.QObject.connect(self.ui.pushButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.saveCurves) #QtCore.QObject.connect(self.ui.pushButton_2, QtCore.SIGNAL(_fromUtf8("clicked()")), self.processNext) #QtCore.QObject.connect(self.ui.spButton, QtCore.SIGNAL(_fromUtf8("clicked()")), facewindow.savePeaks) #QtCore.QObject.connect(self.ui.pThreshold, QtCore.SIGNAL(_fromUtf8("editingFinished()")), self.refreshCurve) QtCore.QMetaObject.connectSlotsByName(self) if __name__ == "__main__": import sys app = QtGui.QApplication(sys.argv) app.setApplicationName( 'qtView' ) canale = curveWindow() canale.show() QtCore.QObject.connect( app, QtCore.SIGNAL( 'lastWindowClosed()' ), app, QtCore.SLOT( 'quit()' ) ) sys.exit(app.exec_())
mit
3,310,249,543,625,385,500
38.284974
471
0.611661
false
vacancy/TensorArtist
examples/generative-model/desc_vae_mnist_mlp_bernoulli_adam.py
1
4242
# -*- coding:utf8 -*- # File : desc_vae_mnist_mlp_bernoulli_adam.py # Author : Jiayuan Mao # Email : maojiayuan@gmail.com # Date : 3/17/17 # # This file is part of TensorArtist. from tartist.core import get_env, get_logger from tartist.core.utils.naming import get_dump_directory, get_data_directory from tartist.nn import opr as O, optimizer, summary logger = get_logger(__file__) __envs__ = { 'dir': { 'root': get_dump_directory(__file__), 'data': get_data_directory('WellKnown/mnist') }, 'trainer': { 'learning_rate': 0.001, 'batch_size': 100, 'epoch_size': 500, 'nr_epochs': 100, }, 'inference': { 'batch_size': 256, 'epoch_size': 40 }, 'demo': { 'is_reconstruct': False } } def make_network(env): with env.create_network() as net: code_length = 20 h, w, c = 28, 28, 1 is_reconstruct = get_env('demo.is_reconstruct', False) dpc = env.create_dpcontroller() with dpc.activate(): def inputs(): img = O.placeholder('img', shape=(None, h, w, c)) return [img] def forward(x): if is_reconstruct or env.phase is env.Phase.TRAIN: with env.variable_scope('encoder'): _ = x _ = O.fc('fc1', _, 500, nonlin=O.tanh) _ = O.fc('fc2', _, 500, nonlin=O.tanh) mu = O.fc('fc3_mu', _, code_length) log_var = O.fc('fc3_sigma', _, code_length) var = O.exp(log_var) std = O.sqrt(var) epsilon = O.random_normal([x.shape[0], code_length]) z_given_x = mu + std * epsilon else: z_given_x = O.random_normal([1, code_length]) with env.variable_scope('decoder'): _ = z_given_x _ = O.fc('fc1', _, 500, nonlin=O.tanh) _ = O.fc('fc2', _, 500, nonlin=O.tanh) _ = O.fc('fc3', _, 784, nonlin=O.sigmoid) _ = _.reshape(-1, h, w, c) x_given_z = _ if env.phase is env.Phase.TRAIN: with env.variable_scope('loss'): content_loss = O.raw_cross_entropy_prob('raw_content', x_given_z.flatten2(), x.flatten2()) content_loss = content_loss.sum(axis=1).mean(name='content') # distrib_loss = 0.5 * (O.sqr(mu) + O.sqr(std) - 2. * O.log(std + 1e-8) - 1.0).sum(axis=1) distrib_loss = -0.5 * (1. + log_var - O.sqr(mu) - var).sum(axis=1) distrib_loss = distrib_loss.mean(name='distrib') loss = content_loss + distrib_loss dpc.add_output(loss, name='loss', reduce_method='sum') dpc.add_output(x_given_z, name='output') dpc.set_input_maker(inputs).set_forward_func(forward) net.add_all_dpc_outputs(dpc, loss_name='loss') if env.phase is env.Phase.TRAIN: summary.inference.scalar('loss', net.loss) def make_optimizer(env): wrapper = optimizer.OptimizerWrapper() wrapper.set_base_optimizer(optimizer.base.AdamOptimizer(get_env('trainer.learning_rate'))) wrapper.append_grad_modifier(optimizer.grad_modifier.LearningRateMultiplier([ ('*/b', 2.0), ])) # wrapper.append_grad_modifier(optimizer.grad_modifier.WeightDecay([ # ('*/W', 0.0005) # ])) env.set_optimizer(wrapper) from data_provider_vae_mnist import * def main_train(trainer): from tartist.plugins.trainer_enhancer import summary summary.enable_summary_history(trainer) summary.enable_echo_summary_scalar(trainer) from tartist.plugins.trainer_enhancer import progress progress.enable_epoch_progress(trainer) from tartist.plugins.trainer_enhancer import snapshot snapshot.enable_snapshot_saver(trainer) from tartist.plugins.trainer_enhancer import inference inference.enable_inference_runner(trainer, make_dataflow_inference) trainer.train()
mit
2,794,771,510,245,185,000
33.487805
114
0.537247
false
CLVsol/clvsol_odoo_addons
clv_person_aux/models/address_aux.py
1
3795
# -*- coding: utf-8 -*- # Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). import logging from odoo import api, fields, models _logger = logging.getLogger(__name__) class AddressAux(models.Model): _inherit = 'clv.address_aux' person_aux_ids = fields.One2many( comodel_name='clv.person_aux', inverse_name='ref_address_aux_id', string='Persons (Aux)' ) count_persons_aux = fields.Integer( string='Persons (Aux) (count)', compute='_compute_count_persons_aux', # store=True ) @api.depends('person_aux_ids') def _compute_count_persons_aux(self): for r in self: r.count_persons_aux = len(r.person_aux_ids) class PersonAux(models.Model): _inherit = 'clv.person_aux' ref_address_aux_is_unavailable = fields.Boolean( string='Address (Aux) is unavailable', default=False, ) ref_address_aux_id = fields.Many2one(comodel_name='clv.address_aux', string='Address (Aux)', ondelete='restrict') ref_address_aux_code = fields.Char(string='Address (Aux) Code', related='ref_address_aux_id.code', store=False) ref_address_aux_phone = fields.Char(string='Address (Aux) Phone', related='ref_address_aux_id.phone') ref_address_aux_mobile_phone = fields.Char(string='Address (Aux) Mobile', related='ref_address_aux_id.mobile') ref_address_aux_email = fields.Char(string='Address (Aux) Email', related='ref_address_aux_id.email') ref_address_aux_category_names = fields.Char( string='Address (Aux) Category Names', related='ref_address_aux_id.category_ids.name', store=True ) ref_address_aux_category_ids = fields.Many2many( comodel_name='clv.address.category', string='Address (Aux) Categories', related='ref_address_aux_id.category_ids' ) @api.multi def do_person_aux_get_ref_address_aux_data(self): for person_aux in self: _logger.info(u'>>>>> %s', person_aux.ref_address_aux_id) if (person_aux.reg_state in ['draft', 'revised']) and \ (person_aux.ref_address_aux_id.id is not False): data_values = {} if person_aux.ref_address_aux_id.id is not False: data_values['ref_address_aux_id'] = person_aux.ref_address_aux_id.id data_values['street'] = person_aux.ref_address_aux_id.street data_values['street2'] = person_aux.ref_address_aux_id.street2 data_values['zip'] = person_aux.ref_address_aux_id.zip data_values['city'] = person_aux.ref_address_aux_id.city data_values['state_id'] = person_aux.ref_address_aux_id.state_id.id data_values['country_id'] = person_aux.ref_address_aux_id.country_id.id # data_values['phone'] = person_aux.ref_address_aux_id.phone # data_values['mobile'] = person_aux.ref_address_aux_id.mobile _logger.info(u'>>>>>>>>>> %s', data_values) person_aux.write(data_values) return True @api.multi def do_person_aux_remove_ref_address_aux(self): for person_aux in self: _logger.info(u'>>>>> %s', person_aux.ref_address_aux_id) if (person_aux.reg_state in ['draft', 'revised']) and \ (person_aux.ref_address_aux_id.id is not False): data_values = {} if person_aux.ref_address_aux_id.id is not False: data_values['ref_address_aux_id'] = False _logger.info(u'>>>>>>>>>> %s', data_values) person_aux.write(data_values) return True
agpl-3.0
-649,032,661,184,708,100
34.138889
117
0.594203
false
mzdu/2048gae
src/main.py
1
1335
import webapp2 import jinja2 import os import logging jinja_environment = jinja2.Environment(loader = jinja2.FileSystemLoader(os.path.dirname(__file__) + '/templates')) def doRender(handler, tname = 'index.html', values = {}): temp = jinja_environment.get_template(tname) handler.response.out.write(temp.render(values)) return True class MainPageHandler(webapp2.RequestHandler): def get(self): values = dict() values['css'] = ['/static/css/main.css'] values['javascript'] = ['/static/js/bind_polyfill.js', '/static/js/classlist_polyfill.js', '/static/js/animframe_polyfill.js', '/static/js/keyboard_input_manager.js', '/static/js/html_actuator.js', '/static/js/grid.js', '/static/js/tile.js', '/static/js/local_storage_manager.js', '/static/js/game_manager.js', '/static/js/application.js', ] doRender(self, 'index.html', values) app = webapp2.WSGIApplication([('/.*', MainPageHandler)],debug = True)
mit
6,896,633,148,561,898,000
35.083333
114
0.495131
false
tylerlaberge/Jasper
jasper/steps.py
1
1413
""" The steps module. """ from functools import wraps import asyncio class Step(object): """ The Step class is used as a wrapper around functions for testing behaviours. """ def __init__(self, function, **kwargs): """ Initialize a new Step object. :param function: The function this step will call when this step is run. :param kwargs: Kwargs to call the given function with. """ self.function = function self.kwargs = kwargs self.ran = False self.passed = False async def run(self, context): """ Run this step and record the results. :param context: A context object too pass into this steps function. """ try: if asyncio.iscoroutinefunction(self.function): await self.function(context, **self.kwargs) else: self.function(context, **self.kwargs) except Exception: raise else: self.passed = True finally: self.ran = True def step(func): """ A decorator for wrapping a function into a Step object. :param func: The function to create a step out of. :return: A function which when called will return a new instance of a Step object. """ @wraps(func) def wrapper(**kwargs): return Step(func, **kwargs) return wrapper
mit
7,253,816,304,372,000,000
24.232143
86
0.587403
false
xbmcmegapack/plugin.video.megapack.dev
resources/lib/menus/home_countries_american_samoa.py
1
1125
#!/usr/bin/python # -*- coding: utf-8 -*- """ This file is part of XBMC Mega Pack Addon. Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html """ class Countries_American_samoa(): '''Class that manages this specific menu context.''' def open(self, plugin, menu): menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels", "Events", "Live", "Movies", "Sports", "TVShows"], countries=["American Samoa"]))
gpl-3.0
-8,099,804,973,255,031,000
37.758621
76
0.69724
false
BurtBiel/azure-cli
src/command_modules/azure-cli-network/azure/cli/command_modules/network/mgmt_route_table/lib/models/__init__.py
1
1448
#--------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. #--------------------------------------------------------------------------------------------- #pylint: skip-file # coding=utf-8 # -------------------------------------------------------------------------- # Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .deployment_route_table import DeploymentRouteTable from .template_link import TemplateLink from .parameters_link import ParametersLink from .provider_resource_type import ProviderResourceType from .provider import Provider from .basic_dependency import BasicDependency from .dependency import Dependency from .deployment_properties_extended import DeploymentPropertiesExtended from .deployment_extended import DeploymentExtended from .route_table_creation_client_enums import ( DeploymentMode, ) __all__ = [ 'DeploymentRouteTable', 'TemplateLink', 'ParametersLink', 'ProviderResourceType', 'Provider', 'BasicDependency', 'Dependency', 'DeploymentPropertiesExtended', 'DeploymentExtended', 'DeploymentMode', ]
mit
-4,311,053,074,764,071,400
37.105263
94
0.593923
false
Lana-Pa/Mantis-training
conftest.py
1
2674
import pytest import json import os.path from fixture.application import Application import ftputil fixture = None target = None def load_config(file): global target if target is None: config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), file) with open(config_file) as f: target = json.load(f) return target @pytest.fixture(scope="session") def config(request): return load_config(request.config.getoption("--target")) @pytest.fixture # create fixture for initialization with checking def app(request, config): global fixture browser = request.config.getoption("--browser") if fixture is None or not fixture.is_valid(): # check for fixture validness fixture = Application(browser=browser, base_url=config['web']["baseUrl"]) return fixture @pytest.fixture(scope="session", autouse=True) def configure_server(request, config): install_server_configuration(config['ftp']['host'], config['ftp']['username'], config['ftp']['password']) def fin(): restore_server_configuration(config['ftp']['host'], config['ftp']['username'], config['ftp']['password']) request.addfinalizer(fin) def install_server_configuration(host,username,password): with ftputil.FTPHost(host, username, password) as remote: # создаем соединение с удаленной машиной if remote.path.isfile("config_inc.php.bak"): # проверка наличия файла remote.remove("config_inc.php.bak") if remote.path.isfile("config_inc.php"): remote.rename("config_inc.php", "config_inc.php.bak") remote.upload(os.path.join(os.path.dirname(__file__),"resources/config_inc.php"), "config_inc.php") def restore_server_configuration(host,username,password): with ftputil.FTPHost(host, username, password) as remote: if remote.path.isfile("config_inc.php.bak"): if remote.path.isfile("config_inc.php"): remote.remove("config_inc.php") remote.rename("config_inc.php.bak", "config_inc.php") @pytest.fixture(scope="session", autouse=True) # create fixture for finalization def stop(request): def fin(): fixture.session.ensure_logout() #check for preconditions before logout fixture.destroy() request.addfinalizer(fin) # teardown function return fixture # hook - add additional parameters to load tests from cmd def pytest_addoption(parser): parser.addoption("--browser", action="store", default="firefox") # parameter, what to do, definition of the parameter parser.addoption("--target", action="store", default="target.json")
apache-2.0
-8,926,993,292,282,782,000
36.971014
122
0.691603
false
tsl143/addons-server
src/olympia/users/cron.py
1
1443
from django.db import connections import multidb from celery import group import olympia.core.logger from olympia.amo import VALID_ADDON_STATUSES from olympia.amo.utils import chunked from .tasks import update_user_ratings_task task_log = olympia.core.logger.getLogger('z.task') def update_user_ratings(): """Update add-on author's ratings.""" cursor = connections[multidb.get_slave()].cursor() # We build this query ahead of time because the cursor complains about data # truncation if it does the parameters. Also, this query is surprisingly # quick, <1sec for 6100 rows returned q = """ SELECT addons_users.user_id as user_id, AVG(rating) as avg_rating FROM reviews INNER JOIN versions INNER JOIN addons_users INNER JOIN addons ON reviews.version_id = versions.id AND addons.id = versions.addon_id AND addons_users.addon_id = addons.id WHERE reviews.reply_to IS NULL AND reviews.rating > 0 AND addons.status IN (%s) GROUP BY addons_users.user_id """ % (",".join(map(str, VALID_ADDON_STATUSES))) cursor.execute(q) d = cursor.fetchall() cursor.close() ts = [update_user_ratings_task.subtask(args=[chunk]) for chunk in chunked(d, 1000)] group(ts).apply_async()
bsd-3-clause
5,285,070,275,735,279,000
30.369565
79
0.615385
false
IncidentNormal/TestApps
ALE/Platform_nodes_Draft3b.py
1
7673
from SimPy.Simulation import * import visual as v import math from random import seed, uniform, randint class Global(): NUMNODES = 4 NUMCHANNELS = 1 Node_L_List = [] Node_A_List = [] Node_S_List = [] ChannelList = [] #stores NodeSendQueueList = [] #stores maxTime = 10 class Mon(): NumListenCollisions = 0 NumSendingCollisions = 0 class Packet(): def __init__(self, a_to, a_from, tx, p_type): self.addr_to = a_to #should be int self.addr_from = a_from self.tx = tx self.p_type = p_type #0=data, 1=confirm class NodeListen(Process): def __init__(self,i): Process.__init__(self,name='NodeL'+str(i)) self.ID = i #shared between Listen and Send processes def execute(self): while True: yield hold, self, 0.01 for chn in G.ChannelList: #potential to randomise this order to prevent all Nodes searching iteratively if chn.nrBuffered > 0: for pkt in chn.theBuffer: #this is a bit magic atm: checking packet without 'grabbing' it if pkt.addr_to == self.ID and pkt.p_type == 0: yield (get,self,chn,1,1),(hold,self,0.0001) #renege after very short time: if item's not there immediately then move on if len(self.got)>0: print 'Node',self.ID, 'got packet from Node',self.got[0].addr_from #yield get,self,chn,1,1 #priority 1 (low) conf_pkt = Packet(self.got[0].addr_from,self.ID,now(),1) yield put,self,G.NodeSendQueueList[self.ID],[conf_pkt],5 #priority 5 (high) print 'Node',self.ID, 'put CONF packet on NodeSendQueue' else: Mon.NumListenCollisions += 1 print 'Listen Collision' yield get,self,chn,1,100 #priority 100 (v high) - getting colliding packet from channel print self.got elif pkt.addr_to == self.ID and pkt.p_type == 1: print 'Node',self.ID,' received CONF packet from', pkt.addr_from, now() yield get,self,chn,1,1 self.interrupt(G.Node_S_List[pkt.addr_from]) class NodePacketAdder(Process): def __init__(self,i): Process.__init__(self,name='NodeA'+str(i)) self.ID = i #shared between Listen and Send and Adding processes def execute(self): while True: yield hold, self, uniform(1,5) nodeToSend = randint(0,G.NUMNODES-1) while nodeToSend == self.ID: #make sure not sending to itself nodeToSend = randint(0,G.NUMNODES-1) pkt = Packet(nodeToSend,self.ID,now(),0) yield put,self,G.NodeSendQueueList[self.ID],[pkt],1 #priority 1 (low) class NodeSend(Process): def __init__(self,i): Process.__init__(self,name='NodeS'+str(i)) self.ID = i def execute(self): yield hold, self, uniform(0,1) #so don't all start at same time while True: sent = False choice = -1 while sent==False : if G.NodeSendQueueList[self.ID].nrBuffered > 0: for i in range(G.NUMCHANNELS): if G.ChannelList[i].nrBuffered==0: choice = i break if choice != -1: yield hold, self, 0.001 #very short wait to represent slight delay if G.ChannelList[choice].nrBuffered==0: if G.NodeSendQueueList[self.ID].nrBuffered > 0: yield get,self,G.NodeSendQueueList[self.ID],1,1 #priority 1 (low) print 'Node',self.ID, 'read from NodeSendQueue, sending packet to:', self.got[0].addr_to, 'type:', self.got[0].p_type, 'on', chn.name else: print 'Something bad happened' yield put,self,chn,self.got, 1 #priority 1 (low) sent=True if self.got[0].p_type==1: yield hold,self,0.1 #time to recieve packet before resending if self.interrupted(): yield get,self,G.NodeSendQueueList[self.ID],1,100 #pop off first entry in list, else it remains on list for next loop, priority 100 (v high) self.interruptReset() print 'Interrupt success: Conf packet received' else: print 'Node',self.ID, 'did not receieve conf, resending' else: yield hold,self,0.01 else: Mon.NumSendingCollisions += 1 print 'Sending Collision' yield get,self,chn,1,100 #priority 100 (v high) - getting colliding packet from channel print self.got yield hold, self, uniform(0,1) #backoff choice = -1 else: yield hold,self,0.01 #if no free channels else: yield hold,self,0.01 #if nothing in buffer class visualising(): def __init__(self): self.sphereList = [] #sphere for each node self.rodList = [] #unused self.manageRodList = [] #rods connecting nodes to centre management node r = 1.0 #radius of circle that nodes are in delta_theta = (2.0*math.pi) / G.NUMNODES #angle between nodes theta = 0 self.management = v.sphere(pos=v.vector(0,0,0), radius=0.1, colour=v.color.blue) #management node in centre self.label = v.label(pos=(1,1,0), text= '0') #label for amount of disparities at that point in time self.label_cum = v.label(pos=(-1,1,0), text= '0') #cumulative total number of above for i in range(0,G.NUMNODES): circ = v.sphere(pos=v.vector(r*math.cos(theta),r*math.sin(theta),0), radius=0.1, color=v.color.green) self.sphereList.append(circ) print 'circle no. ', i, ' coords ', r*math.cos(theta), ' ', r*math.sin(theta) theta += delta_theta rod = v.cylinder(pos=(0,0,0),axis=(self.sphereList[i].pos), radius=0.005, color=v.color.white) self.manageRodList.append(rod) initialize() G = Global() Vis=visualising() for i in range(G.NUMCHANNELS): chn = Store(name='Channel'+str(i),unitName='packet',capacity=1,putQType=PriorityQ,getQType=PriorityQ) G.ChannelList.append(chn) for i in range(G.NUMNODES): nodeQueue = Store(name='NodeQueue'+str(i),unitName='packet',capacity=1,putQType=PriorityQ,getQType=PriorityQ) G.NodeSendQueueList.append(nodeQueue) node_l = NodeListen(i) node_a = NodePacketAdder(i) node_s = NodeSend(i) G.Node_L_List.append(node_l) G.Node_A_List.append(node_a) G.Node_S_List.append(node_s) activate(G.Node_L_List[i],G.Node_L_List[i].execute(),at=0.0) activate(G.Node_A_List[i],G.Node_A_List[i].execute(),at=0.0) activate(G.Node_S_List[i],G.Node_S_List[i].execute(),at=0.0) simulate(until=G.maxTime)
gpl-2.0
-7,459,447,548,906,195,000
47.563291
176
0.524567
false
thinkingmachines/deeplearningworkshop
codelab_5_simple_cnn.py
1
1964
# SIMPLE MNIST CNN # Source: https://www.tensorflow.org/tutorials/layers def cnn_model_fn(features, labels, mode): """Model function for CNN.""" # Input Layer input_layer = tf.reshape(features, [-1, 28, 28, 1]) # Convolutional Layer #1 conv1 = tf.layers.conv2d( inputs=input_layer, filters=32, kernel_size=[5, 5], padding="same", activation=tf.nn.relu) # Pooling Layer #1 pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # Convolutional Layer #2 and Pooling Layer #2 conv2 = tf.layers.conv2d( inputs=pool1, filters=64, kernel_size=[5, 5], padding="same", activation=tf.nn.relu) pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # Dense Layer pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) dropout = tf.layers.dropout( inputs=dense, rate=0.4, training=mode == learn.ModeKeys.TRAIN) # Logits Layer logits = tf.layers.dense(inputs=dropout, units=10) loss = None train_op = None # Calculate Loss (for both TRAIN and EVAL modes) if mode != learn.ModeKeys.INFER: onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10) loss = tf.losses.softmax_cross_entropy( onehot_labels=onehot_labels, logits=logits) # Configure the Training Op (for TRAIN mode) if mode == learn.ModeKeys.TRAIN: train_op = tf.contrib.layers.optimize_loss( loss=loss, global_step=tf.contrib.framework.get_global_step(), learning_rate=0.001, optimizer="SGD") # Generate Predictions predictions = { "classes": tf.argmax( input=logits, axis=1), "probabilities": tf.nn.softmax( logits, name="softmax_tensor") } # Return a ModelFnOps object return model_fn_lib.ModelFnOps( mode=mode, predictions=predictions, loss=loss, train_op=train_op)
mit
78,197,066,181,890,620
29.215385
79
0.653768
false
Penaz91/Glitch_Heaven
Game/loadSaves.py
1
2299
# Load Game Menu Component # Part of the Glitch_Heaven project # Copyright 2015-2016 Penaz <penazarea@altervista.org> from components.UI.loadmenu import loadMenu from os import listdir from os.path import join as pjoin from os import remove from game import Game from components.UI.textMenuItem import textMenuItem from components.UI.textinput import textInput class loadSaveMenu(loadMenu): def __init__(self, screen, keys, config, sounds, log): self.logSectionName = "loadGameMenu" self.dirlist = sorted(listdir(pjoin("savegames"))) super().__init__(screen, keys, config, sounds, log) def loadGame(self, savegame): print(pjoin("savegames", savegame)) Game().main(self.screen, self.keys, "load", pjoin("savegames", savegame), self.config, self.sounds, None, self.mainLogger) self.running = False def eraseSave(self, savegame): confirm = textInput(self.screen, self.font, "Type 'Yes' to confirm deletion").get_input() if (confirm.upper() == "YES"): remove(pjoin("savegames", savegame)) self.running = False def makeLoadItem(self): self.loadgame = textMenuItem("Load", (250, 560), lambda: self.editDesc( "Load the selected savegame"), lambda: self.loadGame( self.dirlist[self.id]), self.config, self.sounds, self.font) self.activeItems.append(self.loadgame) self.items.append(self.loadgame) def makeEraseItem(self): self.erase = textMenuItem("Erase", (400, 560), lambda: self.editDesc( "Delete the Selected SaveGame"), lambda: self.eraseSave(self.dirlist[self.id]), self.config, self.sounds, self.font) self.activeItems.append(self.erase) self.items.append(self.erase) def makeMenuItems(self): super().makeMenuItems() self.makeEraseItem()
mit
7,232,331,274,901,255,000
37.983051
97
0.545455
false
fedora-modularity/pdc-updater
pdcupdater/handlers/__init__.py
1
1784
import abc import fedmsg.utils def load_handlers(config): """ Import and instantiate all handlers listed in the given config. """ for import_path in config['pdcupdater.handlers']: cls = fedmsg.utils.load_class(import_path) handler = cls(config) yield handler class BaseHandler(object): """ An abstract base class for handlers to enforce API. """ __metaclass__ = abc.ABCMeta def __init__(self, config): self.config = config @abc.abstractproperty def topic_suffixes(self): pass @abc.abstractmethod def can_handle(self, msg): """ Return True or False if this handler can handle this message. """ pass @abc.abstractmethod def handle(self, pdc, msg): """ Handle a fedmsg and update PDC if necessary. """ pass @abc.abstractmethod def audit(self, pdc): """ This is intended to be called from a cronjob once every few days and is meant to (in a read-only fashion) check that what PDC thinks is true about a service, is actually true. It is expected to take a long time to run. It should return a two lists. The first should be a list of items present in PDC but not in the other service. The second should be a list of items present in the other service, but not in PDC. Those lists will be sewn together into an email to the releng group. """ pass @abc.abstractmethod def initialize(self, pdc): """ This needs to be called only once when pdc-updater is first installed. It should query the original data source and initialize PDC with a base layer of data. It is expected to take a very long time to run. """ pass
lgpl-2.1
4,880,716,642,753,441,000
29.758621
79
0.638453
false
ww9rivers/splunk-sdk-python
tests/test_kvstore_batch.py
1
2815
#!/usr/bin/env python # # Copyright 2011-2014 Splunk, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import from tests import testlib from splunklib.six.moves import range try: import unittest except ImportError: import unittest2 as unittest import splunklib.client as client class KVStoreBatchTestCase(testlib.SDKTestCase): def setUp(self): super(KVStoreBatchTestCase, self).setUp() self.service.namespace['owner'] = 'nobody' self.service.namespace['app'] = 'search' confs = self.service.kvstore if ('test' in confs): confs['test'].delete() confs.create('test') self.col = confs['test'].data def test_insert_find_update_data(self): data = [{'_key': str(x), 'data': '#' + str(x), 'num': x} for x in range(1000)] self.col.batch_save(*data) testData = self.col.query(sort='num') self.assertEqual(len(testData), 1000) for x in range(1000): self.assertEqual(testData[x]['_key'], str(x)) self.assertEqual(testData[x]['data'], '#' + str(x)) self.assertEqual(testData[x]['num'], x) data = [{'_key': str(x), 'data': '#' + str(x + 1), 'num': x + 1} for x in range(1000)] self.col.batch_save(*data) testData = self.col.query(sort='num') self.assertEqual(len(testData), 1000) for x in range(1000): self.assertEqual(testData[x]['_key'], str(x)) self.assertEqual(testData[x]['data'], '#' + str(x + 1)) self.assertEqual(testData[x]['num'], x + 1) query = [{"query": {"num": x + 1}} for x in range(100)] testData = self.col.batch_find(*query) self.assertEqual(len(testData), 100) testData.sort(key=lambda x: x[0]['num']) for x in range(100): self.assertEqual(testData[x][0]['_key'], str(x)) self.assertEqual(testData[x][0]['data'], '#' + str(x + 1)) self.assertEqual(testData[x][0]['num'], x + 1) def tearDown(self): confs = self.service.kvstore if ('test' in confs): confs['test'].delete() if __name__ == "__main__": try: import unittest2 as unittest except ImportError: import unittest unittest.main()
apache-2.0
9,140,246,485,119,778,000
32.915663
94
0.608171
false
QuLogic/meson
mesonbuild/mintro.py
1
24506
# Copyright 2014-2016 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This is a helper script for IDE developers. It allows you to extract information such as list of targets, files, compiler flags, tests and so on. All output is in JSON for simple parsing. Currently only works for the Ninja backend. Others use generated project files and don't need this info.""" import collections import json from . import build, coredata as cdata from . import mesonlib from .ast import IntrospectionInterpreter, build_target_functions, AstConditionLevel, AstIDGenerator, AstIndentationGenerator, AstJSONPrinter from . import mlog from .backend import backends from .mparser import BaseNode, FunctionNode, ArrayNode, ArgumentNode, StringNode from .interpreter import Interpreter from pathlib import Path, PurePath import typing as T import os import argparse from .mesonlib import OptionKey def get_meson_info_file(info_dir: str) -> str: return os.path.join(info_dir, 'meson-info.json') def get_meson_introspection_version() -> str: return '1.0.0' def get_meson_introspection_required_version() -> T.List[str]: return ['>=1.0', '<2.0'] class IntroCommand: def __init__(self, desc: str, func: T.Optional[T.Callable[[], T.Union[dict, list]]] = None, no_bd: T.Optional[T.Callable[[IntrospectionInterpreter], T.Union[dict, list]]] = None) -> None: self.desc = desc + '.' self.func = func self.no_bd = no_bd def get_meson_introspection_types(coredata: T.Optional[cdata.CoreData] = None, builddata: T.Optional[build.Build] = None, backend: T.Optional[backends.Backend] = None, sourcedir: T.Optional[str] = None) -> 'T.Mapping[str, IntroCommand]': if backend and builddata: benchmarkdata = backend.create_test_serialisation(builddata.get_benchmarks()) testdata = backend.create_test_serialisation(builddata.get_tests()) installdata = backend.create_install_data() interpreter = backend.interpreter else: benchmarkdata = testdata = installdata = None # Enforce key order for argparse return collections.OrderedDict([ ('ast', IntroCommand('Dump the AST of the meson file', no_bd=dump_ast)), ('benchmarks', IntroCommand('List all benchmarks', func=lambda: list_benchmarks(benchmarkdata))), ('buildoptions', IntroCommand('List all build options', func=lambda: list_buildoptions(coredata), no_bd=list_buildoptions_from_source)), ('buildsystem_files', IntroCommand('List files that make up the build system', func=lambda: list_buildsystem_files(builddata, interpreter))), ('dependencies', IntroCommand('List external dependencies', func=lambda: list_deps(coredata), no_bd=list_deps_from_source)), ('scan_dependencies', IntroCommand('Scan for dependencies used in the meson.build file', no_bd=list_deps_from_source)), ('installed', IntroCommand('List all installed files and directories', func=lambda: list_installed(installdata))), ('projectinfo', IntroCommand('Information about projects', func=lambda: list_projinfo(builddata), no_bd=list_projinfo_from_source)), ('targets', IntroCommand('List top level targets', func=lambda: list_targets(builddata, installdata, backend), no_bd=list_targets_from_source)), ('tests', IntroCommand('List all unit tests', func=lambda: list_tests(testdata))), ]) def add_arguments(parser: argparse.ArgumentParser) -> None: intro_types = get_meson_introspection_types() for key, val in intro_types.items(): flag = '--' + key.replace('_', '-') parser.add_argument(flag, action='store_true', dest=key, default=False, help=val.desc) parser.add_argument('--backend', choices=sorted(cdata.backendlist), dest='backend', default='ninja', help='The backend to use for the --buildoptions introspection.') parser.add_argument('-a', '--all', action='store_true', dest='all', default=False, help='Print all available information.') parser.add_argument('-i', '--indent', action='store_true', dest='indent', default=False, help='Enable pretty printed JSON.') parser.add_argument('-f', '--force-object-output', action='store_true', dest='force_dict', default=False, help='Always use the new JSON format for multiple entries (even for 0 and 1 introspection commands)') parser.add_argument('builddir', nargs='?', default='.', help='The build directory') def dump_ast(intr: IntrospectionInterpreter) -> T.Dict[str, T.Any]: printer = AstJSONPrinter() intr.ast.accept(printer) return printer.result def list_installed(installdata: backends.InstallData) -> T.Dict[str, str]: res = {} if installdata is not None: for t in installdata.targets: res[os.path.join(installdata.build_dir, t.fname)] = \ os.path.join(installdata.prefix, t.outdir, os.path.basename(t.fname)) for alias in t.aliases.keys(): res[os.path.join(installdata.build_dir, alias)] = \ os.path.join(installdata.prefix, t.outdir, os.path.basename(alias)) for i in installdata.data: res[i.path] = os.path.join(installdata.prefix, i.install_path) for i in installdata.headers: res[i.path] = os.path.join(installdata.prefix, i.install_path, os.path.basename(i.path)) for i in installdata.man: res[i.path] = os.path.join(installdata.prefix, i.install_path) for i in installdata.install_subdirs: res[i.path] = os.path.join(installdata.prefix, i.install_path) return res def get_target_dir(coredata: cdata.CoreData, subdir: str) -> str: if coredata.get_option(OptionKey('layout')) == 'flat': return 'meson-out' else: return subdir def list_targets_from_source(intr: IntrospectionInterpreter) -> T.List[T.Dict[str, T.Union[bool, str, T.List[T.Union[str, T.Dict[str, T.Union[str, T.List[str], bool]]]]]]]: tlist = [] # type: T.List[T.Dict[str, T.Union[bool, str, T.List[T.Union[str, T.Dict[str, T.Union[str, T.List[str], bool]]]]]]] root_dir = Path(intr.source_root) def nodes_to_paths(node_list: T.List[BaseNode]) -> T.List[Path]: res = [] # type: T.List[Path] for n in node_list: args = [] # type: T.List[BaseNode] if isinstance(n, FunctionNode): args = list(n.args.arguments) if n.func_name in build_target_functions: args.pop(0) elif isinstance(n, ArrayNode): args = n.args.arguments elif isinstance(n, ArgumentNode): args = n.arguments for j in args: if isinstance(j, StringNode): assert isinstance(j.value, str) res += [Path(j.value)] elif isinstance(j, str): res += [Path(j)] res = [root_dir / i['subdir'] / x for x in res] res = [x.resolve() for x in res] return res for i in intr.targets: sources = nodes_to_paths(i['sources']) extra_f = nodes_to_paths(i['extra_files']) outdir = get_target_dir(intr.coredata, i['subdir']) tlist += [{ 'name': i['name'], 'id': i['id'], 'type': i['type'], 'defined_in': i['defined_in'], 'filename': [os.path.join(outdir, x) for x in i['outputs']], 'build_by_default': i['build_by_default'], 'target_sources': [{ 'language': 'unknown', 'compiler': [], 'parameters': [], 'sources': [str(x) for x in sources], 'generated_sources': [] }], 'extra_files': [str(x) for x in extra_f], 'subproject': None, # Subprojects are not supported 'installed': i['installed'] }] return tlist def list_targets(builddata: build.Build, installdata: backends.InstallData, backend: backends.Backend) -> T.List[T.Any]: tlist = [] # type: T.List[T.Any] build_dir = builddata.environment.get_build_dir() src_dir = builddata.environment.get_source_dir() # Fast lookup table for installation files install_lookuptable = {} for i in installdata.targets: out = [os.path.join(installdata.prefix, i.outdir, os.path.basename(i.fname))] out += [os.path.join(installdata.prefix, i.outdir, os.path.basename(x)) for x in i.aliases] install_lookuptable[os.path.basename(i.fname)] = [str(PurePath(x)) for x in out] for (idname, target) in builddata.get_targets().items(): if not isinstance(target, build.Target): raise RuntimeError('The target object in `builddata.get_targets()` is not of type `build.Target`. Please file a bug with this error message.') outdir = get_target_dir(builddata.environment.coredata, target.subdir) t = { 'name': target.get_basename(), 'id': idname, 'type': target.get_typename(), 'defined_in': os.path.normpath(os.path.join(src_dir, target.subdir, 'meson.build')), 'filename': [os.path.join(build_dir, outdir, x) for x in target.get_outputs()], 'build_by_default': target.build_by_default, 'target_sources': backend.get_introspection_data(idname, target), 'extra_files': [os.path.normpath(os.path.join(src_dir, x.subdir, x.fname)) for x in target.extra_files], 'subproject': target.subproject or None } if installdata and target.should_install(): t['installed'] = True ifn = [install_lookuptable.get(x, [None]) for x in target.get_outputs()] t['install_filename'] = [x for sublist in ifn for x in sublist] # flatten the list else: t['installed'] = False tlist.append(t) return tlist def list_buildoptions_from_source(intr: IntrospectionInterpreter) -> T.List[T.Dict[str, T.Union[str, bool, int, T.List[str]]]]: subprojects = [i['name'] for i in intr.project_data['subprojects']] return list_buildoptions(intr.coredata, subprojects) def list_buildoptions(coredata: cdata.CoreData, subprojects: T.Optional[T.List[str]] = None) -> T.List[T.Dict[str, T.Union[str, bool, int, T.List[str]]]]: optlist = [] # type: T.List[T.Dict[str, T.Union[str, bool, int, T.List[str]]]] subprojects = subprojects or [] dir_option_names = set(cdata.BUILTIN_DIR_OPTIONS) test_option_names = {OptionKey('errorlogs'), OptionKey('stdsplit')} dir_options: 'cdata.KeyedOptionDictType' = {} test_options: 'cdata.KeyedOptionDictType' = {} core_options: 'cdata.KeyedOptionDictType' = {} for k, v in coredata.options.items(): if k in dir_option_names: dir_options[k] = v elif k in test_option_names: test_options[k] = v elif k.is_builtin(): core_options[k] = v if not v.yielding: for s in subprojects: core_options[k.evolve(subproject=s)] = v def add_keys(options: 'cdata.KeyedOptionDictType', section: str) -> None: for key, opt in sorted(options.items()): optdict = {'name': str(key), 'value': opt.value, 'section': section, 'machine': key.machine.get_lower_case_name() if coredata.is_per_machine_option(key) else 'any'} if isinstance(opt, cdata.UserStringOption): typestr = 'string' elif isinstance(opt, cdata.UserBooleanOption): typestr = 'boolean' elif isinstance(opt, cdata.UserComboOption): optdict['choices'] = opt.choices typestr = 'combo' elif isinstance(opt, cdata.UserIntegerOption): typestr = 'integer' elif isinstance(opt, cdata.UserArrayOption): typestr = 'array' else: raise RuntimeError("Unknown option type") optdict['type'] = typestr optdict['description'] = opt.description optlist.append(optdict) add_keys(core_options, 'core') add_keys({k: v for k, v in coredata.options.items() if k.is_backend()}, 'backend') add_keys({k: v for k, v in coredata.options.items() if k.is_base()}, 'base') add_keys( {k: v for k, v in sorted(coredata.options.items(), key=lambda i: i[0].machine) if k.is_compiler()}, 'compiler', ) add_keys(dir_options, 'directory') add_keys({k: v for k, v in coredata.options.items() if k.is_project()}, 'user') add_keys(test_options, 'test') return optlist def find_buildsystem_files_list(src_dir: str) -> T.List[str]: # I feel dirty about this. But only slightly. filelist = [] # type: T.List[str] for root, _, files in os.walk(src_dir): for f in files: if f == 'meson.build' or f == 'meson_options.txt': filelist.append(os.path.relpath(os.path.join(root, f), src_dir)) return filelist def list_buildsystem_files(builddata: build.Build, interpreter: Interpreter) -> T.List[str]: src_dir = builddata.environment.get_source_dir() filelist = interpreter.get_build_def_files() # type: T.List[str] filelist = [PurePath(src_dir, x).as_posix() for x in filelist] return filelist def list_deps_from_source(intr: IntrospectionInterpreter) -> T.List[T.Dict[str, T.Union[str, bool]]]: result = [] # type: T.List[T.Dict[str, T.Union[str, bool]]] for i in intr.dependencies: keys = [ 'name', 'required', 'version', 'has_fallback', 'conditional', ] result += [{k: v for k, v in i.items() if k in keys}] return result def list_deps(coredata: cdata.CoreData) -> T.List[T.Dict[str, T.Union[str, T.List[str]]]]: result = [] # type: T.List[T.Dict[str, T.Union[str, T.List[str]]]] for d in coredata.deps.host.values(): if d.found(): result += [{'name': d.name, 'version': d.get_version(), 'compile_args': d.get_compile_args(), 'link_args': d.get_link_args()}] return result def get_test_list(testdata: T.List[backends.TestSerialisation]) -> T.List[T.Dict[str, T.Union[str, int, T.List[str], T.Dict[str, str]]]]: result = [] # type: T.List[T.Dict[str, T.Union[str, int, T.List[str], T.Dict[str, str]]]] for t in testdata: to = {} # type: T.Dict[str, T.Union[str, int, T.List[str], T.Dict[str, str]]] if isinstance(t.fname, str): fname = [t.fname] else: fname = t.fname to['cmd'] = fname + t.cmd_args if isinstance(t.env, build.EnvironmentVariables): to['env'] = t.env.get_env({}) else: to['env'] = t.env to['name'] = t.name to['workdir'] = t.workdir to['timeout'] = t.timeout to['suite'] = t.suite to['is_parallel'] = t.is_parallel to['priority'] = t.priority to['protocol'] = str(t.protocol) to['depends'] = t.depends result.append(to) return result def list_tests(testdata: T.List[backends.TestSerialisation]) -> T.List[T.Dict[str, T.Union[str, int, T.List[str], T.Dict[str, str]]]]: return get_test_list(testdata) def list_benchmarks(benchdata: T.List[backends.TestSerialisation]) -> T.List[T.Dict[str, T.Union[str, int, T.List[str], T.Dict[str, str]]]]: return get_test_list(benchdata) def list_projinfo(builddata: build.Build) -> T.Dict[str, T.Union[str, T.List[T.Dict[str, str]]]]: result = {'version': builddata.project_version, 'descriptive_name': builddata.project_name, 'subproject_dir': builddata.subproject_dir} # type: T.Dict[str, T.Union[str, T.List[T.Dict[str, str]]]] subprojects = [] for k, v in builddata.subprojects.items(): c = {'name': k, 'version': v, 'descriptive_name': builddata.projects.get(k)} # type: T.Dict[str, str] subprojects.append(c) result['subprojects'] = subprojects return result def list_projinfo_from_source(intr: IntrospectionInterpreter) -> T.Dict[str, T.Union[str, T.List[T.Dict[str, str]]]]: sourcedir = intr.source_root files = find_buildsystem_files_list(sourcedir) files = [os.path.normpath(x) for x in files] for i in intr.project_data['subprojects']: basedir = os.path.join(intr.subproject_dir, i['name']) i['buildsystem_files'] = [x for x in files if x.startswith(basedir)] files = [x for x in files if not x.startswith(basedir)] intr.project_data['buildsystem_files'] = files intr.project_data['subproject_dir'] = intr.subproject_dir return intr.project_data def print_results(options: argparse.Namespace, results: T.Sequence[T.Tuple[str, T.Union[dict, T.List[T.Any]]]], indent: int) -> int: if not results and not options.force_dict: print('No command specified') return 1 elif len(results) == 1 and not options.force_dict: # Make to keep the existing output format for a single option print(json.dumps(results[0][1], indent=indent)) else: out = {} for i in results: out[i[0]] = i[1] print(json.dumps(out, indent=indent)) return 0 def get_infodir(builddir: T.Optional[str] = None) -> str: infodir = 'meson-info' if builddir is not None: infodir = os.path.join(builddir, infodir) return infodir def get_info_file(infodir: str, kind: T.Optional[str] = None) -> str: return os.path.join(infodir, 'meson-info.json' if not kind else f'intro-{kind}.json') def load_info_file(infodir: str, kind: T.Optional[str] = None) -> T.Any: with open(get_info_file(infodir, kind)) as fp: return json.load(fp) def run(options: argparse.Namespace) -> int: datadir = 'meson-private' infodir = get_infodir(options.builddir) if options.builddir is not None: datadir = os.path.join(options.builddir, datadir) indent = 4 if options.indent else None results = [] # type: T.List[T.Tuple[str, T.Union[dict, T.List[T.Any]]]] sourcedir = '.' if options.builddir == 'meson.build' else options.builddir[:-11] intro_types = get_meson_introspection_types(sourcedir=sourcedir) if 'meson.build' in [os.path.basename(options.builddir), options.builddir]: # Make sure that log entries in other parts of meson don't interfere with the JSON output mlog.disable() backend = backends.get_backend_from_name(options.backend) assert backend is not None intr = IntrospectionInterpreter(sourcedir, '', backend.name, visitors = [AstIDGenerator(), AstIndentationGenerator(), AstConditionLevel()]) intr.analyze() # Re-enable logging just in case mlog.enable() for key, val in intro_types.items(): if (not options.all and not getattr(options, key, False)) or not val.no_bd: continue results += [(key, val.no_bd(intr))] return print_results(options, results, indent) try: raw = load_info_file(infodir) intro_vers = raw.get('introspection', {}).get('version', {}).get('full', '0.0.0') except FileNotFoundError: if not os.path.isdir(datadir) or not os.path.isdir(infodir): print('Current directory is not a meson build directory.\n' 'Please specify a valid build dir or change the working directory to it.') else: print('Introspection file {} does not exist.\n' 'It is also possible that the build directory was generated with an old\n' 'meson version. Please regenerate it in this case.'.format(get_info_file(infodir))) return 1 vers_to_check = get_meson_introspection_required_version() for i in vers_to_check: if not mesonlib.version_compare(intro_vers, i): print('Introspection version {} is not supported. ' 'The required version is: {}' .format(intro_vers, ' and '.join(vers_to_check))) return 1 # Extract introspection information from JSON for i in intro_types.keys(): if not intro_types[i].func: continue if not options.all and not getattr(options, i, False): continue try: results += [(i, load_info_file(infodir, i))] except FileNotFoundError: print('Introspection file {} does not exist.'.format(get_info_file(infodir, i))) return 1 return print_results(options, results, indent) updated_introspection_files = [] # type: T.List[str] def write_intro_info(intro_info: T.Sequence[T.Tuple[str, T.Union[dict, T.List[T.Any]]]], info_dir: str) -> None: global updated_introspection_files for i in intro_info: out_file = os.path.join(info_dir, 'intro-{}.json'.format(i[0])) tmp_file = os.path.join(info_dir, 'tmp_dump.json') with open(tmp_file, 'w') as fp: json.dump(i[1], fp) fp.flush() # Not sure if this is needed os.replace(tmp_file, out_file) updated_introspection_files += [i[0]] def generate_introspection_file(builddata: build.Build, backend: backends.Backend) -> None: coredata = builddata.environment.get_coredata() intro_types = get_meson_introspection_types(coredata=coredata, builddata=builddata, backend=backend) intro_info = [] # type: T.List[T.Tuple[str, T.Union[dict, T.List[T.Any]]]] for key, val in intro_types.items(): if not val.func: continue intro_info += [(key, val.func())] write_intro_info(intro_info, builddata.environment.info_dir) def update_build_options(coredata: cdata.CoreData, info_dir: str) -> None: intro_info = [ ('buildoptions', list_buildoptions(coredata)) ] write_intro_info(intro_info, info_dir) def split_version_string(version: str) -> T.Dict[str, T.Union[str, int]]: vers_list = version.split('.') return { 'full': version, 'major': int(vers_list[0] if len(vers_list) > 0 else 0), 'minor': int(vers_list[1] if len(vers_list) > 1 else 0), 'patch': int(vers_list[2] if len(vers_list) > 2 else 0) } def write_meson_info_file(builddata: build.Build, errors: list, build_files_updated: bool = False) -> None: global updated_introspection_files info_dir = builddata.environment.info_dir info_file = get_meson_info_file(info_dir) intro_types = get_meson_introspection_types() intro_info = {} for i in intro_types.keys(): if not intro_types[i].func: continue intro_info[i] = { 'file': f'intro-{i}.json', 'updated': i in updated_introspection_files } info_data = { 'meson_version': split_version_string(cdata.version), 'directories': { 'source': builddata.environment.get_source_dir(), 'build': builddata.environment.get_build_dir(), 'info': info_dir, }, 'introspection': { 'version': split_version_string(get_meson_introspection_version()), 'information': intro_info, }, 'build_files_updated': build_files_updated, } if errors: info_data['error'] = True info_data['error_list'] = [x if isinstance(x, str) else str(x) for x in errors] else: info_data['error'] = False # Write the data to disc tmp_file = os.path.join(info_dir, 'tmp_dump.json') with open(tmp_file, 'w') as fp: json.dump(info_data, fp) fp.flush() os.replace(tmp_file, info_file)
apache-2.0
-1,309,064,487,587,284,500
44.297597
172
0.615278
false
djrrb/OldGlory
oldGlory.py
1
7067
""" OLD GLORY By David Jonathan Ross <http://www.djr.com> This drawbot script will draw the American Flag. It's also responsive! I made this to experiment with Drawbot Variables. For the most part, it follows the rules here: http://en.wikipedia.org/wiki/Flag_of_the_United_States#Specifications It does make some small allowances in order to get better results when the variables are customized. Wouldn't it be cool if the stars followed the historical patterns, starting with the ring of 13? Maybe next time. """ import random from AppKit import NSColor ###### # SETTING GLOBAL VARIABLES ###### # define some of our key variables as special DrawBot variables, which can be manipulated with a simple UI Variable([ dict( name='flagSize', ui='Slider', args=dict( minValue=1, maxValue=10, value=5, tickMarkCount=10, stopOnTickMarks=True ) ), dict( name='proportion', ui='Slider', args=dict( minValue=1, maxValue=3, value=1.9, tickMarkCount=21, stopOnTickMarks=True ) ), dict( name='stripeCount', ui='Slider', args=dict( minValue=1, maxValue=21, value=13, tickMarkCount=11, stopOnTickMarks=True ) ), dict( name='starRows', ui='Slider', args=dict( minValue=1, maxValue=21, value=9, tickMarkCount=11, stopOnTickMarks=True ) ), dict( name='starCols', ui='Slider', args=dict( minValue=1, maxValue=21, value=11, tickMarkCount=11, stopOnTickMarks=True ) ), dict( name='oddStripeColor', ui='ColorWell', args=dict(color=NSColor.redColor()) ), dict( name='evenStripeColor', ui='ColorWell', args=dict(color=NSColor.whiteColor()) ), dict( name='cantonColor', ui='ColorWell', args=dict(color=NSColor.blueColor()) ), dict( name='starColor', ui='ColorWell', args=dict(color=NSColor.whiteColor()) ), dict( name='jasperize', ui='Slider', args=dict( minValue=1, maxValue=6, value=1, tickMarkCount=6, stopOnTickMarks=True ) ), ], globals()) # here are some other variables that will help us draw the flag inch = 72 # our base unit, the height of the flag unit = flagSize * inch # some of the variables come out of the UI as floats, but I need them as ints # since I intend to use them with the range() function jasperize = int(round(jasperize)) stripeCount = int(round(stripeCount)) starRows = int(round(starRows)) starCols = int(round(starCols)) # flag dimensions #proportion = 1.9 ###### this is now an adjustable variable pageWidth = unit * proportion pageHeight = unit # stripes stripeHeight = pageHeight / int(round(stripeCount)) # canton cantonHeight = stripeHeight * ( int(round(stripeCount)/2) + 1) cantonWidth = (2 / 5) * pageWidth # stars starColWidth = cantonWidth / (starCols+1) starRowWidth = cantonHeight / (starRows+1) # starDiameter should be defined as (4 / 5) * stripeHeight, but this rule # allows decent star sizing regardless of the number of starCols or starRows starDiameter = min(starColWidth, starRowWidth) # let's define the drawing of the star as a function, since we will be using it a lot def star(x, y, d, b=None): # this is a hacky, non-mathematically correct star made from two polygons # if I were good at math, I would have drawn this a smarter way fill(starColor) r = d/2 # an upside down triangle newPath() moveTo((x-r/1.1, y+r/3.5)) lineTo((x+r/1.1, y+r/3.5)) lineTo((x, y-r/2.6)) closePath() drawPath() # a right side up triangle with a divet in the bottom newPath() moveTo((x, y+r)) lineTo((x-r/1.6, y-r/1.3)) lineTo((x, y-r/2.6)) lineTo((x+r/1.6, y-r/1.3)) closePath() drawPath() ###### # BUILD THE FLAG ###### # set page size size(pageWidth, pageHeight) # Loop through all the times we are going to draw the flag for flag in range(jasperize): # Stripes # build the stripes up from the origin y = 0 for stripe in range(stripeCount): if stripe % 2: fill(evenStripeColor) else: fill(oddStripeColor) rect(0, y, pageWidth, stripeHeight) # increment the y value so we travel up the page y += pageHeight/stripeCount # CANTON (that's the blue thing) # make a rectangle from the top left corner fill(cantonColor) rect(0, pageHeight-cantonHeight, cantonWidth, cantonHeight) # STARS # the american flag does not contain an even grid of stars # some rows have 6 stars, others have 5 # some columns have 5 stars, others have 4 # but if we think of the canton as a checkerboard, there is a 9x11 grid # where each position can have either a star or a gap. # let's define the position where we will start drawing the stars starOriginX = starColWidth starOriginY = pageHeight - cantonHeight + starRowWidth # now let's define some variables that we will change as we loop through starX = starOriginX starY = starOriginY # loop through all of the rows for y in range(starRows): # loop through all of the columns for x in range(starCols): # if both row and column are odd, draw the star if not x % 2 and not y % 2: star(starX, starY, starDiameter) # if both row and column are even, also draw the star: elif x % 2 and y % 2: star(starX, starY, starDiameter) # if the row is odd and the column is even, or vice versa # we should draw nothing # increment the x value to continue across the row starX += starColWidth # when we are done with the row, reset the x value and increment the y starX = starOriginX starY += starRowWidth # Draw the shadow as two rectangles shadowLength = height() / 30 fill(0, 0, 0, .5) rect(shadowLength, -shadowLength*2, width()+shadowLength, shadowLength*2) rect(width(), 0, shadowLength*2, height()-shadowLength) # now that we are done drawing the flag # scale the canvas, and relocate our canvas's position to the center # this way, all future drawing will happen at a new scale, for jasperization scaleFactor = .78 widthDiff = width()-width()*scaleFactor heightDiff = height()-height()*scaleFactor translate(widthDiff/2, heightDiff/2) scale(scaleFactor) # keep your eye on that grand old flag!
mit
6,339,230,936,733,769,000
27.963115
113
0.600538
false
kalliope-project/kalliope
kalliope/__init__.py
1
8965
#!/usr/bin/env python # coding: utf8 import argparse import logging import time from kalliope.core import Utils from kalliope.core.ConfigurationManager import SettingLoader from kalliope.core.ConfigurationManager.BrainLoader import BrainLoader from kalliope.core.SignalLauncher import SignalLauncher from flask import Flask from kalliope.core.RestAPI.FlaskAPI import FlaskAPI from ._version import version_str import signal import sys from kalliope.core.ResourcesManager import ResourcesManager from kalliope.core.SynapseLauncher import SynapseLauncher from kalliope.core.OrderAnalyser import OrderAnalyser logging.basicConfig() logger = logging.getLogger("kalliope") def signal_handler(signal, frame): """ Used to catch a keyboard signal like Ctrl+C in order to kill the kalliope program. :param signal: signal handler :param frame: execution frame """ print("\n") Utils.print_info("Ctrl+C pressed. Killing Kalliope") sys.exit(0) # actions available ACTION_LIST = ["start", "gui", "install", "uninstall"] def parse_args(args): """ Parsing function :param args: arguments passed from the command line :return: return parser """ # create arguments parser = argparse.ArgumentParser(description='Kalliope') parser.add_argument("action", help="[start|install|uninstall]") parser.add_argument("--run-synapse", help="Name of a synapse to load surrounded by quote") parser.add_argument("--run-order", help="order surrounded by a quote") parser.add_argument("--brain-file", help="Full path of a brain file") parser.add_argument("--debug", action='store_true', help="Show debug output") parser.add_argument("--git-url", help="Git URL of the neuron to install") parser.add_argument("--neuron-name", help="Neuron name to uninstall") parser.add_argument("--stt-name", help="STT name to uninstall") parser.add_argument("--tts-name", help="TTS name to uninstall") parser.add_argument("--trigger-name", help="Trigger name to uninstall") parser.add_argument("--signal-name", help="Signal name to uninstall") parser.add_argument("--deaf", action='store_true', help="Starts Kalliope deaf") parser.add_argument('-v', '--version', action='version', version='Kalliope ' + version_str) # parse arguments from script parameters return parser.parse_args(args) def main(): """Entry point of Kalliope program.""" # parse argument. the script name is removed try: parser = parse_args(sys.argv[1:]) except SystemExit: sys.exit(1) # check if we want debug configure_logging(debug=parser.debug) logger.debug("kalliope args: %s" % parser) # by default, no brain file is set. # Use the default one: brain.yml in the root path brain_file = None # check if user set a brain.yml file if parser.brain_file: brain_file = parser.brain_file # check the user provide a valid action if parser.action not in ACTION_LIST: Utils.print_warning("%s is not a recognised action\n" % parser.action) sys.exit(1) # install modules if parser.action == "install": if not parser.git_url: Utils.print_danger("You must specify the git url") sys.exit(1) else: parameters = { "git_url": parser.git_url } res_manager = ResourcesManager(**parameters) res_manager.install() return # uninstall modules if parser.action == "uninstall": if not parser.neuron_name \ and not parser.stt_name \ and not parser.tts_name \ and not parser.trigger_name \ and not parser.signal_name: Utils.print_danger("You must specify a module name with " "--neuron-name " "or --stt-name " "or --tts-name " "or --trigger-name " "or --signal-name") sys.exit(1) else: res_manager = ResourcesManager() res_manager.uninstall(neuron_name=parser.neuron_name, stt_name=parser.stt_name, tts_name=parser.tts_name, trigger_name=parser.trigger_name, signal_name=parser.signal_name) return # load the brain once brain_loader = BrainLoader(file_path=brain_file) brain = brain_loader.brain # load settings # get global configuration once settings_loader = SettingLoader() settings = settings_loader.settings if parser.action == "start": # user set a synapse to start if parser.run_synapse is not None: SynapseLauncher.start_synapse_by_list_name([parser.run_synapse], brain=brain) if parser.run_order is not None: SynapseLauncher.run_matching_synapse_from_order(parser.run_order, brain=brain, settings=settings, is_api_call=False) if (parser.run_synapse is None) and (parser.run_order is None): # if --deaf if parser.deaf: settings.options.deaf = True # start rest api start_rest_api(settings, brain) start_kalliope(settings, brain) class AppFilter(logging.Filter): """ Class used to add a custom entry into the logger """ def filter(self, record): record.app_version = "kalliope-%s" % version_str return True def configure_logging(debug=None): """ Prepare log folder in current home directory. :param debug: If true, set the lof level to debug """ logger = logging.getLogger("kalliope") logger.addFilter(AppFilter()) logger.propagate = False syslog = logging.StreamHandler() syslog.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s :: %(app_version)s :: %(message)s', "%Y-%m-%d %H:%M:%S") syslog.setFormatter(formatter) if debug: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) # add the handlers to logger logger.addHandler(syslog) logger.debug("Logger ready") def get_list_signal_class_to_load(brain): """ Return a list of signal class name For all synapse, each signal type is added to a list only if the signal is not yet present in the list :param brain: Brain object :type brain: Brain :return: set of signal class """ list_signal_class_name = set() for synapse in brain.synapses: for signal_object in synapse.signals: list_signal_class_name.add(signal_object.name) logger.debug("[Kalliope entrypoint] List of signal class to load: %s" % list_signal_class_name) return list_signal_class_name def start_rest_api(settings, brain): """ Start the Rest API if asked in the user settings """ # run the api if the user want it if settings.rest_api.active: Utils.print_info("Starting REST API Listening port: %s" % settings.rest_api.port) app = Flask(__name__) flask_api = FlaskAPI(app=app, port=settings.rest_api.port, brain=brain, allowed_cors_origin=settings.rest_api.allowed_cors_origin) flask_api.daemon = True flask_api.start() def start_kalliope(settings, brain): """ Start all signals declared in the brain """ # start kalliope Utils.print_success("Starting Kalliope") Utils.print_info("Press Ctrl+C for stopping") # catch signal for killing on Ctrl+C pressed signal.signal(signal.SIGINT, signal_handler) # get a list of signal class to load from declared synapse in the brain # this list will contain string of signal class type. # For example, if the brain contains multiple time the signal type "order", the list will be ["order"] # If the brain contains some synapse with "order" and "event", the list will be ["order", "event"] list_signals_class_to_load = get_list_signal_class_to_load(brain) # start each class name for signal_class_name in list_signals_class_to_load: signal_instance = SignalLauncher.launch_signal_class_by_name(signal_name=signal_class_name, settings=settings) if signal_instance is not None: signal_instance.daemon = True signal_instance.start() while True: # keep main thread alive time.sleep(0.1)
gpl-3.0
-1,805,813,272,564,953,000
33.217557
106
0.608254
false
SU-ECE-17-7/hotspotter
hscom/fileio.py
1
12148
from __future__ import division, print_function import __common__ (print, print_, print_on, print_off, rrr, profile) = __common__.init(__name__, '[io]') # Python import os import fnmatch import pickle import cPickle from os.path import normpath, exists, realpath, join, expanduser, dirname import datetime import time # Science import numpy as np import cv2 from PIL import Image from PIL.ExifTags import TAGS # Hotspotter import helpers #import skimage #import shelve #import datetime #import timeit VERBOSE_IO = 0 # 2 # --- Saving --- def save_npy(fpath, data): with open(fpath, 'wb') as file: np.save(file, data) def save_npz(fpath, data): with open(fpath, 'wb') as file: np.savez(file, data) def save_cPkl(fpath, data): with open(fpath, 'wb') as file: cPickle.dump(data, file, cPickle.HIGHEST_PROTOCOL) def save_pkl(fpath, data): with open(fpath, 'wb') as file: pickle.dump(data, file, pickle.HIGHEST_PROTOCOL) # --- Loading --- def load_npz_memmap(fpath): with open(fpath, 'rb') as file: npz = np.load(file, mmap_mode='r') data = npz['arr_0'] npz.close() return data def load_npz(fpath): with open(fpath, 'rb') as file: npz = np.load(file, mmap_mode=None) data = npz['arr_0'] npz.close() return data def load_npy(fpath): with open(fpath, 'rb') as file: data = np.load(file) return data def load_cPkl(fpath): with open(fpath, 'rb') as file: data = cPickle.load(file) return data def load_pkl(fpath): with open(fpath, 'rb') as file: data = pickle.load(file) return data ext2_load_func = { '.npy': load_npy, '.npz': load_npz, '.cPkl': load_cPkl, '.pkl': load_pkl} ext2_save_func = { '.npy': save_npy, '.npz': save_npz, '.cPkl': save_cPkl, '.pkl': save_pkl} def debug_smart_load(dpath='', fname='*', uid='*', ext='*'): pattern = fname + uid + ext print('[io] debug_smart_load(): dpath=%r' % (dpath)) for fname_ in os.listdir(dpath): if fnmatch.fnmatch(fname_, pattern): #fpath = join(dpath, fname_) print(fname_) # --- Smart Load/Save --- def __args2_fpath(dpath, fname, uid, ext): if len(ext) > 0 and ext[0] != '.': raise Exception('Fatal Error: Please be explicit and use a dot in ext') fname_uid = fname + uid if len(fname_uid) > 128: fname_uid = helpers.hashstr(fname_uid) fpath = join(dpath, fname_uid + ext) fpath = realpath(fpath) fpath = normpath(fpath) return fpath @profile def smart_save(data, dpath='', fname='', uid='', ext='', verbose=VERBOSE_IO): ''' Saves data to the direcotry speficied ''' helpers.ensuredir(dpath) fpath = __args2_fpath(dpath, fname, uid, ext) if verbose: if verbose > 1: print('[io]') print(('[io] smart_save(dpath=%r,\n' + (' ' * 11) + 'fname=%r, uid=%r, ext=%r)') % (dpath, fname, uid, ext)) ret = __smart_save(data, fpath, verbose) if verbose > 1: print('[io]') return ret @profile def smart_load(dpath='', fname='', uid='', ext='', verbose=VERBOSE_IO, **kwargs): ''' Loads data to the direcotry speficied ''' fpath = __args2_fpath(dpath, fname, uid, ext) if verbose: if verbose > 1: print('[io]') print(('[io] smart_load(dpath=%r,\n' + (' ' * 11) + 'fname=%r, uid=%r, ext=%r)') % (dpath, fname, uid, ext)) data = __smart_load(fpath, verbose, **kwargs) if verbose > 1: print('[io]') return data @profile def __smart_save(data, fpath, verbose): ' helper ' dpath, fname = os.path.split(fpath) fname_noext, ext_ = os.path.splitext(fname) save_func = ext2_save_func[ext_] if verbose > 1: print('[io] saving: %r' % (type(data),)) try: save_func(fpath, data) if verbose > 1: print('[io] saved %s ' % (filesize_str(fpath),)) except Exception as ex: print('[io] ! Exception will saving %r' % fpath) print(helpers.indent(repr(ex), '[io] ')) raise @profile def __smart_load(fpath, verbose, allow_alternative=False, can_fail=True, **kwargs): ' helper ' # Get components of the filesname dpath, fname = os.path.split(fpath) fname_noext, ext_ = os.path.splitext(fname) # If exact path doesnt exist if not exists(fpath): print('[io] fname=%r does not exist' % fname) if allow_alternative: # allows alternative extension convert_alternative(fpath, verbose, can_fail=can_fail, **kwargs) # Ensure a valid extension if ext_ == '': raise NotImplementedError('') else: load_func = ext2_load_func[ext_] # Do actual data loading try: if verbose > 1: print('[io] loading ' + filesize_str(fpath)) data = load_func(fpath) if verbose: print('[io]... loaded data') except Exception as ex: if verbose: print('[io] ! Exception while loading %r' % fpath) print('[io] caught ex=%r' % (ex,)) data = None if not can_fail: raise if data is None: if verbose: print('[io]... did not load %r' % fpath) return data #---- # --- Util --- def convert_alternative(fpath, verbose, can_fail): # check for an alternative (maybe old style or ext) file alternatives = find_alternatives(fpath, verbose) dpath, fname = os.path.split(fpath) if len(alternatives) == 0: fail_msg = '[io] ...no alternatives to %r' % fname if verbose: print(fail_msg) if can_fail: return None else: raise IOError(fail_msg) else: #load and convert alternative alt_fpath = alternatives[0] if verbose > 1: print('[io] ...converting %r' % alt_fpath) data = __smart_load(alt_fpath, verbose, allow_alternative=False) __smart_save(data, fpath, verbose) return data def find_alternatives(fpath, verbose): # Check if file is in another format dpath, fname = os.path.split(fpath) fname_noext, ext_ = os.path.splitext(fname) fpath_noext = join(dpath, fname_noext) alternatives = [] # Find files with a different for alt_ext in list(['.npy', '.npz', '.cPkl', '.pkl']): alt_fpath = fpath_noext + alt_ext if exists(alt_fpath): alternatives.append(alt_fpath) if verbose > 1: # Print num alternatives / filesizes print('[io] Found %d alternate(s)' % len(alternatives)) for alt_fpath in iter(alternatives): print('[io] ' + filesize_str(alt_fpath)) return alternatives def sanatize_fpath(fpath, ext=None): # UNUSED! 'Ensures a filepath has correct the extension' dpath, fname = os.path.split(fpath) fname_noext, ext_ = os.path.splitext(fname) if not ext is None and ext_ != ext: fname = fname_noext + ext fpath = normpath(join(dpath, fname)) return fpath def print_filesize(fpath): print(filesize_str(fpath)) @profile def filesize_str(fpath): _, fname = os.path.split(fpath) mb_str = helpers.file_megabytes_str(fpath) return 'filesize(%r)=%s' % (fname, mb_str) @profile def exiftime_to_unixtime(datetime_str): try: dt = datetime.datetime.strptime(datetime_str, '%Y:%m:%d %H:%M:%S') return time.mktime(dt.timetuple()) except TypeError: #if datetime_str is None: #return -1 return -1 except ValueError as ex: if isinstance(datetime_str, str): if datetime_str.find('No EXIF Data') == 0: return -1 if datetime_str.find('Invalid') == 0: return -1 print('!!!!!!!!!!!!!!!!!!') print('Caught Error: ' + repr(ex)) print('datetime_str = %r' % datetime_str) raise @profile def check_exif_keys(pil_image): info_ = pil_image._getexif() valid_keys = [] invalid_keys = [] for key, val in info_.iteritems(): try: exif_keyval = TAGS[key] valid_keys.append((key, exif_keyval)) except KeyError: invalid_keys.append(key) print('[io] valid_keys = ' + '\n'.join(valid_keys)) print('-----------') #import draw_func2 as df2 #exec(df2.present()) @profile def read_all_exif_tags(pil_image): info_ = pil_image._getexif() info_iter = info_.iteritems() tag_ = lambda key: TAGS.get(key, key) exif = {} if info_ is None else {tag_(k): v for k, v in info_iter} return exif @profile def read_one_exif_tag(pil_image, tag): try: exif_key = TAGS.keys()[TAGS.values().index(tag)] except ValueError: return 'Invalid EXIF Tag' info_ = pil_image._getexif() if info_ is None: return None else: invalid_str = 'Invalid EXIF Key: exif_key=%r, tag=%r' % (exif_key, tag) exif_val = info_.get(exif_key, invalid_str) return exif_val #try: #exif_val = info_[exif_key] #except KeyError: #exif_val = 'Invalid EXIF Key: exif_key=%r, tag=%r' % (exif_key, tag) #print('') #print(exif_val) #check_exif_keys(pil_image) @profile def read_exif(fpath, tag=None): try: pil_image = Image.open(fpath) if not hasattr(pil_image, '_getexif'): return 'No EXIF Data' except IOError as ex: import argparse2 print('Caught IOError: %r' % (ex,)) print_image_checks(fpath) if argparse2.ARGS_.strict: raise return {} if tag is None else None if tag is None: exif = read_all_exif_tags(pil_image) else: exif = read_one_exif_tag(pil_image, tag) del pil_image return exif @profile def print_image_checks(img_fpath): hasimg = helpers.checkpath(img_fpath, verbose=True) if hasimg: _tup = (img_fpath, filesize_str(img_fpath)) print('[io] Image %r (%s) exists. Is it corrupted?' % _tup) else: print('[io] Image %r does not exists ' (img_fpath,)) return hasimg @profile def read_exif_list(fpath_list, **kwargs): def _gen(fpath_list): # Exif generator nGname = len(fpath_list) lbl = '[io] Load Image EXIF' mark_progress, end_progress = helpers.progress_func(nGname, lbl, 16) for count, fpath in enumerate(fpath_list): mark_progress(count) yield read_exif(fpath, **kwargs) end_progress() exif_list = [exif for exif in _gen(fpath_list)] return exif_list @profile def imread(img_fpath): try: imgBGR = cv2.imread(img_fpath, flags=cv2.CV_LOAD_IMAGE_COLOR) return imgBGR except Exception as ex: print('[io] Caught Exception: %r' % ex) print('[io] ERROR reading: %r' % (img_fpath,)) raise # --- Standard Images --- def splash_img_fpath(): hsdir = dirname(__file__) splash_fpath = realpath(join(hsdir, '../hsgui/_frontend/splash.png')) return splash_fpath # --- Global Cache --- # TODO: This doesnt belong here HOME = expanduser('~') #GLOBAL_CACHE_DIR = realpath('.hotspotter/global_cache') GLOBAL_CACHE_DIR = join(HOME, '.hotspotter/global_cache') helpers.ensuredir(GLOBAL_CACHE_DIR) def global_cache_read(cache_id, default='.'): cache_fname = join(GLOBAL_CACHE_DIR, 'cached_dir_%s.txt' % cache_id) return helpers.read_from(cache_fname) if exists(cache_fname) else default def global_cache_write(cache_id, newdir): cache_fname = join(GLOBAL_CACHE_DIR, 'cached_dir_%s.txt' % cache_id) helpers.write_to(cache_fname, newdir) def delete_global_cache(): global_cache_dir = GLOBAL_CACHE_DIR helpers.remove_files_in_dir(global_cache_dir, recursive=True, verbose=True, dryrun=False) # --- Shelve Caching --- #def read_cache(fpath): #pass #def write_cache(fpath): #with open(fpath, 'wa') as file_ #shelf = shelve.open(file_) #def cached_keys(fpath): #pass
apache-2.0
-9,184,094,975,685,221,000
26.862385
88
0.583388
false
Ghost-script/TaskMan
wsgi/taskman/taskman/views.py
1
4067
from django.shortcuts import render, HttpResponse, redirect from forms import LoginForm, RegistrationForm from django.contrib.auth import login, logout, authenticate from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from taskManager.forms import TaskCreate,MultipleSelect from taskManager.views import show_task, show_logs from django.contrib.auth.hashers import make_password,is_password_usable def index(request): """ Handles user login """ if request.method == 'POST': form = LoginForm(request.POST) if form.is_valid(): email = form.cleaned_data['email'] password = form.cleaned_data['password'] user = authenticate(email=email, password=password) if user is not None: if user.error is None: login(request, user) return redirect('home') else: form.message = "Email/Password Mismatch" return render(request, 'index.html', {'form': form}) form.message = "Email not found" return render(request, 'index.html', {'form': form, 'page': 'index'}) else: form.message = "Invalid Email" return render(request, 'index.html', {'form': form, 'page': 'index'}) else: form = LoginForm() return render(request, 'index.html', {'form': form, 'page': 'index'}) def register_user(request): """ Handles user Registration """ form = RegistrationForm(request.POST) if request.method == 'POST': if form.is_valid(): username = form.cleaned_data['username'] email = form.cleaned_data['email'] password = form.cleaned_data['password'] confirm = form.cleaned_data['confirm'] try: user = User.objects.get(email=email) form.error = "Email already registered!" return render(request, 'registration.html', {'form': form}) except User.DoesNotExist: if password == confirm: password = make_password(password) if is_password_usable(password): user = User(username=username, email=email, password=password) user.save() form = RegistrationForm() form.message = "Success" else: form.message = "Password cannot be used" else: form.message = "Comfirm and Password field do not match" return render(request, 'registration.html', {'form': form, 'page': 'reg'}) except Exception as e: #logging be implemented here print e else: form.error = "Invalid form feild Values" return render(request, 'registration.html', {'form': form, 'page': 'reg'}) else: form = RegistrationForm() return render(request, 'registration.html', {'form': form, 'page': 'reg'}) @login_required(login_url="/") def dashboard(request): """ Handles dashboard tasklist request functions: Sorting the tasks , Showing TrackerLogs """ col = request.GET.get('sortby', 'id') order = request.GET.get('order', 'asc') task = show_task(request, col=col, order=order) logs = show_logs(request) form = MultipleSelect() return render(request, 'dashboard.html', {'tasks': task, 'logs': logs, 'form': form, 'page': 'home'}) def logout_user(request): """ Logs user Out """ logout(request) return redirect('/')
gpl-2.0
6,536,155,798,609,383,000
34.365217
78
0.522252
false
erikrose/sphinx-js
sphinx_js/typedoc.py
1
13554
"""Converter from typedoc output to jsdoc doclet format""" import os import sys import json from six import iteritems # JSDoc entries used in sphinx-js: # - optional access of [ public, private, protected ] # - optional classdesc # - optional comment (controls doclet inclusion) # - optional description # - optional exceptions # - optional exclude-members # - kind of [ function, typedef, <other> ] # - longname # - optional memberof # - meta.filename # - meta.lineno # - meta.code.paramnames # - meta.code.path # - name # - optional params # - optional properties # - optional returns # - type.names # - optional undocumented class TypeDoc(object): """ Encapsulation of the Typedoc to JSDoc conversion process. Upon construction this class will convert the typedoc JSON object to a list of JSDoc doclets in :py:attr:`jsdoc`. This class holds all the state used during the conversion making it easy to do multiple (sequential) conversions. :ivar jsdoc: the list of generated doclets :ivar nodelist: the flattened typedoc entries indexed by 'id' JSDoc JSON schema: https://github.com/jsdoc3/jsdoc/blob/master/lib/jsdoc/schema.js """ def __init__(self, root): """ Construct a list of jsdoc entries from the typedoc JSON object. :param root: a JSON object from a typedoc JSON file """ self.jsdoc = [] self.nodelist = {} self.make_node_list(root) self.convert_node(root) def get_parent(self, node): """ Get the parent of a node. :param node: A Typedoc node :return: The parent Typedoc node, or None if node was the root. """ parentId = node.get('__parentId') return self.nodelist[parentId] if parentId is not None else None def extend_doclet(self, result, **kwargs): """ Extend a jsdoc entry. .. note:: Filters out keywords with value None. This is used explicitely, for example in :py:func:`simple_doclet`, and implicitely when typedoc may lack an entry, for example in the description field in :py:func:`make_result`. """ result.update(**kwargs) return {k: v for k, v in iteritems(result) if v is not None} def make_doclet(self, **kwargs): """Create a new jsdoc entry""" return self.extend_doclet({}, **kwargs) def make_longname(self, node): """Construct the jsdoc longname entry for a typedoc node""" parent = self.get_parent(node) longname = self.make_longname(parent) if parent is not None else '' kindString = node.get('kindString') if kindString in [None, 'Function', 'Constructor', 'Method']: return longname if longname != '': flags = node.get('flags') if (parent.get('kindString') in ['Class', 'Interface'] and flags.get('isStatic') is not True): longname += '#' elif parent.get('kindString') in ['Function', 'Method']: longname += '.' else: longname += '~' if kindString == 'Module': return longname + 'module:' + node.get('name')[1:-1] elif kindString == 'External module': return longname + 'external:' + node.get('name')[1:-1] else: return longname + node.get('name') def make_meta(self, node): """Construct the jsdoc meta entry for a typedoc node""" source = node.get('sources')[0] return { 'path': os.path.dirname(source.get('fileName')) or './', 'filename': os.path.basename(source.get('fileName')), 'lineno': source.get('line'), 'code': {} } def make_type_name(self, type): """Construct the name of a type from a Typedoc type entry""" names = [] if type.get('type') == 'reference' and type.get('id'): node = self.nodelist[type.get('id')] # Should be: names = [ self.make_longname(node)] parent = self.nodelist[node.get('__parentId')] if parent.get('kindString') == 'External module': names = [parent['name'][1:-1] + '.' + node['name']] else: names = [node['name']] elif type.get('type') in ['intrinsic', 'reference']: names = [type.get('name')] elif type.get('type') == 'stringLiteral': names = ['"' + type.get('value') + '"'] elif type.get('type') == 'array': names = [self.make_type_name(type.get('elementType')) + '[]'] elif type.get('type') == 'tuple': types = [self.make_type_name(t) for t in type.get('elements')] names = ['[' + ','.join(types) + ']'] elif type.get('type') == 'union': types = [self.make_type_name(t) for t in type.get('types')] names = [' | '.join(types)] elif type.get('type') == 'typeOperator': target_name = self.make_type_name(type.get('target')) names = [type.get('operator'), target_name] elif type.get('type') == 'typeParameter': names = [type.get('name')] constraint = type.get('constraint') if constraint is not None: names.extend(['extends', self.make_type_name(constraint)]) elif type.get('type') == 'reflection': names = ['<TODO>'] return ' '.join(names) def make_type(self, type): """Construct a jsdoc type entry""" return { 'names': [self.make_type_name(type)] } def make_description(self, comment): """Construct a jsdoc description entry""" if not comment: return '' else: return '\n\n'.join([ comment.get('shortText', ''), comment.get('text', '') ]) def make_param(self, param): """Construct a jsdoc parameter entry""" typeEntry = param.get('type') if typeEntry is None: return self.make_doclet( name=param.get('name'), description=self.make_description(param.get('comment')) ) else: return self.make_doclet( name=param.get('name'), type=self.make_type(typeEntry), description=self.make_description(param.get('comment')) ) def make_result(self, param): """Construct a jsdoc function result entry""" type = param.get('type') if type is None or type.get('name') == 'void': return [] return [self.make_doclet( name=param.get('name'), type=self.make_type(type), description=param.get('comment', {}).get('returns') )] def simple_doclet(self, kind, node): """Construct a jsdoc entry with some frequently used fields.""" memberof = self.make_longname(self.get_parent(node)) if memberof == '': memberof = None if node.get('flags').get('isPrivate'): access = 'private' elif node.get('flags').get('isProtected'): access = 'protected' else: access = None comment = node.get('comment') return self.make_doclet( kind=kind, access=access, comment=node.get('comment', {}).get('text', '<empty>'), meta=self.make_meta(node), name=node.get('name'), longname=self.make_longname(node), memberof=memberof, description=self.make_description(comment) ) def convert_node(self, node): """ Convert a typedoc entry to a jsdoc entry. Typedoc entries are hierarchical, so this function will recurse. New entries are added to :py:attr:`self.jsdoc`. .. rubric:: To do Some entries generate restructured text. Preferably this information should be captured in the jsdoc entries and used in the templates. """ if node.get('inheritedFrom'): return if node.get('sources'): # Ignore nodes with a reference to absolute paths (like /usr/lib) source = node.get('sources')[0] if source.get('fileName', '.')[0] == '/': return kindString = node.get('kindString') if kindString == 'External module': doclet = self.simple_doclet('external', node) elif kindString == 'Module': doclet = self.simple_doclet('module', node) elif kindString in ['Class', 'Interface']: specifiers = [] if kindString == 'Interface': doclet = self.simple_doclet('interface', node) specifiers.append('*interface*') else: doclet = self.simple_doclet('class', node) doclet['classdesc'] = '' if node.get('flags', {}).get('isAbstract'): specifiers.append('*abstract*') if node.get('flags', {}).get('isExported'): module_name = self.get_parent(node).get('name')[1:-1] specifiers.append('*exported from* :js:mod:`' + module_name + '`') doclet['classdesc'] += ', '.join(specifiers) if node.get('extendedTypes'): doclet['classdesc'] += '\n\n**Extends:**\n' for type in node.get('extendedTypes', []): type_name = self.make_type_name(type) doclet['classdesc'] += ' * :js:class:`' + type_name + '`\n' if node.get('implementedTypes'): doclet['classdesc'] += '\n\n**Implements:**\n' for type in node.get('implementedTypes', []): type_name = self.make_type_name(type) doclet['classdesc'] += ' * :js:class:`' + type_name + '`\n' doclet['params'] = [] for param in node.get('typeParameter', []): doclet['params'].append(self.make_param(param)) self.extend_doclet( doclet, extends=[e['name'] for e in node.get('extendedTypes', [])] ) elif kindString == 'Property': doclet = self.simple_doclet('member', node) if node.get('flags', {}).get('isAbstract'): doclet['description'] = '*abstract*\n\n' + doclet['description'] self.extend_doclet( doclet, type=self.make_type(node.get('type')) ) elif kindString == 'Accessor': doclet = self.simple_doclet('member', node) if node.get('getSignature'): type = self.make_type(node['getSignature']['type']) else: type_name = node['setSignature']['parameters'][0]['type'] type = self.make_type(type_name) self.extend_doclet(doclet, type=type) elif kindString in ['Function', 'Constructor', 'Method']: for sig in node.get('signatures'): sig['sources'] = node['sources'] self.convert_node(sig) return elif kindString in ['Constructor signature', 'Call signature']: parent = self.get_parent(node) doclet = self.simple_doclet('function', node) if parent.get('flags', {}).get('isAbstract'): doclet['description'] = '*abstract*\n\n' + doclet['description'] if parent.get('flags', {}).get('isOptional'): doclet['description'] = '*optional*\n\n' + doclet['description'] self.extend_doclet( doclet, params=[], returns=self.make_result(node) ) doclet['meta']['code']['paramnames'] = [] for param in node.get('parameters', []): doclet['params'].append(self.make_param(param)) doclet['meta']['code']['paramnames'].append(param.get('name')) else: doclet = None if doclet: self.jsdoc.append(doclet) for child in node.get('children', []): self.convert_node(child) def make_node_list(self, node, parent=None): """Flatten the tree of Typedoc entries to a list indexed by 'id'""" if node is None: return if node.get('id') is not None: node['__parentId'] = parent self.nodelist[node['id']] = node for tag in ['children', 'signatures', 'parameters']: for child in node.get(tag, []): self.make_node_list(child, node.get('id')) typetag = node.get('type') if isinstance(typetag, dict) and typetag['type'] != 'reference': self.make_node_list(typetag, parent) self.make_node_list(node.get('declaration'), None) def parse_typedoc(inputfile): """Parse and convert the typedoc JSON file to a list jsdoc entries""" typedoc = TypeDoc(json.load(inputfile)) return typedoc.jsdoc def typedoc(inputname): """ Read a typedoc file and print the resulting jsdoc list. .. note:: This function only exists to test this module in isolation. """ with open(inputname, 'r') as inputfile: json.dump(parse_typedoc(inputfile), sys.stdout, indent=2) if __name__ == '__main__': typedoc(sys.argv[1])
mit
7,926,526,332,986,313,000
36.545706
86
0.539324
false
openstack/networking-bgpvpn
bgpvpn_dashboard/test/admin/test_views.py
1
3209
# Copyright (c) 2017 Orange. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from collections import namedtuple from bgpvpn_dashboard.api import bgpvpn as bgpvpn_api from bgpvpn_dashboard.dashboards.admin.bgpvpn import tables as bgpvpn_tables from bgpvpn_dashboard.dashboards.admin.bgpvpn import views as bgpvpn_views from openstack_dashboard.test import helpers VIEWS = "bgpvpn_dashboard.dashboards.admin.bgpvpn.views" class TestIndexView(helpers.APITestCase): def setUp(self): super(TestIndexView, self).setUp() mock_request = mock.Mock(horizon={'async_messages': []}) self.bgpvpn_view = bgpvpn_views.IndexView(request=mock_request) self.assertEqual(bgpvpn_tables.BgpvpnTable, self.bgpvpn_view.table_class) def _get_mock_bgpvpn(self, prefix): bgpvpn_info = {} if prefix: bgpvpn_info = { "name": "%s_name" % prefix, "route_targets": [], "import_targets": [], "export_targets": [], "networks": [], "routers": [], "tenant_id": "tenant_id", "type": "l3" } return bgpvpn_api.Bgpvpn(bgpvpn_info) @mock.patch.object(bgpvpn_views.api, 'keystone', autospec=True) def test_get_tenant_name(self, mock_api): Tenant = namedtuple("Tenant", ["id", "name"]) tenant = Tenant("tenant_id", "tenant_name") mock_api.tenant_get.return_value = tenant result = self.bgpvpn_view._get_tenant_name("tenant_id") mock_api.tenant_get.assert_called_once_with( self.bgpvpn_view.request, "tenant_id") self.assertEqual(result, "tenant_name") @mock.patch('%s.IndexView._get_tenant_name' % VIEWS, return_value={"tenant_id": "tenant_name"}) @mock.patch.object(bgpvpn_views, 'api', autospec=True) @mock.patch.object(bgpvpn_views, 'bgpvpn_api', autospec=True) def test_get_data(self, mock_bgpvpn_api, mock_api, mock_get_tenant_name): bgpvpn_foo = self._get_mock_bgpvpn("foo") bgpvpn_bar = self._get_mock_bgpvpn("bar") mock_neutron_client = mock_api.neutron.neutronclient(mock.Mock()) mock_bgpvpn_api.bgpvpns_list.return_value = [bgpvpn_foo, bgpvpn_bar] mock_neutron_client.list_networks.return_value = [] mock_neutron_client.list_routers.return_value = [] expected_bgpvpns = [bgpvpn_foo, bgpvpn_bar] result = self.bgpvpn_view.get_data() calls = [mock.call("tenant_id"), mock.call("tenant_id")] mock_get_tenant_name.assert_has_calls(calls) self.assertEqual(result, expected_bgpvpns)
apache-2.0
7,976,155,279,535,758,000
38.617284
77
0.648177
false
lpfann/fri
fri/model/lupi_ordinal_regression.py
1
12804
from itertools import product import cvxpy as cvx import numpy as np from sklearn.metrics import make_scorer from sklearn.utils import check_X_y from fri.model.base_lupi import ( LUPI_Relevance_CVXProblem, split_dataset, is_lupi_feature, ) from fri.model.ordinal_regression import ( OrdinalRegression_Relevance_Bound, ordinal_scores, ) from .base_initmodel import LUPI_InitModel from .base_type import ProblemType class LUPI_OrdinalRegression(ProblemType): def __init__(self, **kwargs): super().__init__(**kwargs) self._lupi_features = None @property def lupi_features(self): return self._lupi_features @classmethod def parameters(cls): return ["C", "scaling_lupi_w"] @property def get_initmodel_template(cls): return LUPI_OrdinalRegression_SVM @property def get_cvxproblem_template(cls): return LUPI_OrdinalRegression_Relevance_Bound def relax_factors(cls): return ["loss_slack", "w_l1_slack"] def preprocessing(self, data, lupi_features=None): X, y = data d = X.shape[1] if lupi_features is None: raise ValueError("Argument 'lupi_features' missing in fit() call.") if not isinstance(lupi_features, int): raise ValueError("Argument 'lupi_features' is not type int.") if not 0 < lupi_features < d: raise ValueError( "Argument 'lupi_features' looks wrong. We need at least 1 priviliged feature (>0) or at least one normal feature." ) self._lupi_features = lupi_features # Check that X and y have correct shape X, y = check_X_y(X, y) if np.min(y) > 0: print("First ordinal class has index > 0. Shifting index...") y = y - np.min(y) return X, y class LUPI_OrdinalRegression_SVM(LUPI_InitModel): HYPERPARAMETER = ["C", "scaling_lupi_w"] def __init__(self, C=1, scaling_lupi_w=1, lupi_features=None): super().__init__() self.scaling_lupi_w = scaling_lupi_w self.C = C self.lupi_features = lupi_features def fit(self, X_combined, y, lupi_features=None): """ Parameters ---------- lupi_features : int Number of features in dataset which are considered privileged information (PI). PI features are expected to be the last features in the dataset. """ if lupi_features is None: try: lupi_features = self.lupi_features self.lupi_features = lupi_features except: raise ValueError("No amount of lupi features given.") X, X_priv = split_dataset(X_combined, self.lupi_features) (n, d) = X.shape self.classes_ = np.unique(y) # Get parameters from CV model without any feature contstraints C = self.get_params()["C"] scaling_lupi_w = self.get_params()["scaling_lupi_w"] get_original_bin_name, n_bins = get_bin_mapping(y) n_boundaries = n_bins - 1 # Initalize Variables in cvxpy w = cvx.Variable(shape=(d), name="w") b_s = cvx.Variable(shape=(n_boundaries), name="bias") w_priv = cvx.Variable(shape=(self.lupi_features, 2), name="w_priv") d_priv = cvx.Variable(shape=(2), name="bias_priv") def priv_function(bin, sign): indices = np.where(y == get_original_bin_name[bin]) return X_priv[indices] @ w_priv[:, sign] + d_priv[sign] # L1 norm regularization of both functions with 1 scaling constant priv_l1_1 = cvx.norm(w_priv[:, 0], 1) priv_l1_2 = cvx.norm(w_priv[:, 1], 1) w_priv_l1 = priv_l1_1 + priv_l1_2 w_l1 = cvx.norm(w, 1) weight_regularization = 0.5 * (w_l1 + scaling_lupi_w * w_priv_l1) constraints = [] loss = 0 for left_bin in range(0, n_bins - 1): indices = np.where(y == get_original_bin_name[left_bin]) constraints.append( X[indices] @ w - b_s[left_bin] <= -1 + priv_function(left_bin, 0) ) constraints.append(priv_function(left_bin, 0) >= 0) loss += cvx.sum(priv_function(left_bin, 0)) # Add constraints for slack into right neighboring bins for right_bin in range(1, n_bins): indices = np.where(y == get_original_bin_name[right_bin]) constraints.append( X[indices] @ w - b_s[right_bin - 1] >= +1 - priv_function(right_bin, 1) ) constraints.append(priv_function(right_bin, 1) >= 0) loss += cvx.sum(priv_function(right_bin, 1)) for i_boundary in range(0, n_boundaries - 1): constraints.append(b_s[i_boundary] <= b_s[i_boundary + 1]) objective = cvx.Minimize(C * loss + weight_regularization) # Solve problem. problem = cvx.Problem(objective, constraints) problem.solve(**self.SOLVER_PARAMS) w = w.value b_s = b_s.value self.model_state = { "w": w, "b_s": b_s, "w_priv": w_priv.value, "d_priv": d_priv.value, "lupi_features": lupi_features, # Number of lupi features in the dataset TODO: Move this somewhere else "bin_boundaries": n_boundaries, } self.constraints = { "loss": loss.value, "w_l1": w_l1.value, "w_priv_l1": w_priv_l1.value, } return self def predict(self, X): X, X_priv = split_dataset(X, self.lupi_features) w = self.model_state["w"] b_s = self.model_state["b_s"] scores = np.dot(X, w.T)[np.newaxis] bin_thresholds = np.append(b_s, np.inf) # If thresholds are smaller than score the value belongs to the bigger bin # after subtracting we check for positive elements indices = np.sum(scores.T - bin_thresholds >= 0, -1) return self.classes_[indices] def score(self, X, y, error_type="mmae", return_error=False, **kwargs): X, y = check_X_y(X, y) prediction = self.predict(X) score = ordinal_scores(y, prediction, error_type, return_error=return_error) return score def make_scorer(self): # Use multiple scores for ordinal regression mze = make_scorer(ordinal_scores, error_type="mze") mae = make_scorer(ordinal_scores, error_type="mae") mmae = make_scorer(ordinal_scores, error_type="mmae") scorer = {"mze": mze, "mae": mae, "mmae": mmae} return scorer, "mmae" def get_bin_mapping(y): """ Get ordered unique classes and corresponding mapping from old names Parameters ---------- y: array of discrete values (int, str) Returns ------- """ classes_ = np.unique(y) original_bins = sorted(classes_) n_bins = len(original_bins) bins = np.arange(n_bins) get_old_bin = dict(zip(bins, original_bins)) return get_old_bin, n_bins class LUPI_OrdinalRegression_Relevance_Bound( LUPI_Relevance_CVXProblem, OrdinalRegression_Relevance_Bound ): @classmethod def generate_lower_bound_problem( cls, best_hyperparameters, init_constraints, best_model_state, data, di, preset_model, probeID=-1, ): is_priv = is_lupi_feature( di, data, best_model_state ) # Is it a lupi feature where we need additional candidate problems? if not is_priv: yield from super().generate_lower_bound_problem( best_hyperparameters, init_constraints, best_model_state, data, di, preset_model, probeID=probeID, ) else: for sign in [1, -1]: problem = cls( di, data, best_hyperparameters, init_constraints, preset_model=preset_model, best_model_state=best_model_state, probeID=probeID, ) problem.init_objective_LB(sign=sign) problem.isLowerBound = True yield problem @classmethod def generate_upper_bound_problem( cls, best_hyperparameters, init_constraints, best_model_state, data, di, preset_model, probeID=-1, ): is_priv = is_lupi_feature( di, data, best_model_state ) # Is it a lupi feature where we need additional candidate problems? if not is_priv: yield from super().generate_upper_bound_problem( best_hyperparameters, init_constraints, best_model_state, data, di, preset_model, probeID=probeID, ) else: for sign, pos in product([1, -1], [0, 1]): problem = cls( di, data, best_hyperparameters, init_constraints, preset_model=preset_model, best_model_state=best_model_state, probeID=probeID, ) problem.init_objective_UB(sign=sign, pos=pos) yield problem @classmethod def aggregate_min_candidates(cls, min_problems_candidates): vals = [candidate.solved_relevance for candidate in min_problems_candidates] # We take the max of mins because we need the necessary contribution over all functions min_value = max(vals) return min_value def _init_objective_LB_LUPI(self, sign=None, bin_index=None, **kwargs): self.add_constraint( sign * self.w_priv[self.lupi_index, :] <= self.feature_relevance ) self._objective = cvx.Minimize(self.feature_relevance) def _init_objective_UB_LUPI(self, sign=None, pos=None, **kwargs): self.add_constraint( self.feature_relevance <= sign * self.w_priv[self.lupi_index, pos] ) self._objective = cvx.Maximize(self.feature_relevance) def _init_constraints(self, parameters, init_model_constraints): # Upper constraints from initial model init_w_l1 = init_model_constraints["w_l1"] init_w_priv_l1 = init_model_constraints["w_priv_l1"] init_loss = init_model_constraints["loss"] scaling_lupi_w = parameters["scaling_lupi_w"] get_original_bin_name, n_bins = get_bin_mapping(self.y) n_boundaries = n_bins - 1 # Initalize Variables in cvxpy w = cvx.Variable(shape=(self.d), name="w") b_s = cvx.Variable(shape=(n_boundaries), name="bias") w_priv = cvx.Variable(shape=(self.d_priv, 2), name="w_priv") d_priv = cvx.Variable(shape=(2), name="bias_priv") def priv_function(bin, sign): indices = np.where(self.y == get_original_bin_name[bin]) return self.X_priv[indices] @ w_priv[:, sign] + d_priv[sign] # L1 norm regularization of both functions with 1 scaling constant priv_l1_1 = cvx.norm(w_priv[:, 0], 1) priv_l1_2 = cvx.norm(w_priv[:, 1], 1) w_priv_l1 = priv_l1_1 + priv_l1_2 w_l1 = cvx.norm(w, 1) loss = 0 for left_bin in range(0, n_bins - 1): indices = np.where(self.y == get_original_bin_name[left_bin]) self.add_constraint( self.X[indices] @ w - b_s[left_bin] <= -1 + priv_function(left_bin, 0) ) self.add_constraint(priv_function(left_bin, 0) >= 0) loss += cvx.sum(priv_function(left_bin, 0)) # Add constraints for slack into right neighboring bins for right_bin in range(1, n_bins): indices = np.where(self.y == get_original_bin_name[right_bin]) self.add_constraint( self.X[indices] @ w - b_s[right_bin - 1] >= +1 - priv_function(right_bin, 1) ) self.add_constraint(priv_function(right_bin, 1) >= 0) loss += cvx.sum(priv_function(right_bin, 1)) for i_boundary in range(0, n_boundaries - 1): self.add_constraint(b_s[i_boundary] <= b_s[i_boundary + 1]) self.add_constraint( w_l1 + scaling_lupi_w * w_priv_l1 <= init_w_l1 + scaling_lupi_w * init_w_priv_l1 ) self.add_constraint(loss <= init_loss) self.w = w self.w_priv = w_priv self.feature_relevance = cvx.Variable(nonneg=True, name="Feature Relevance")
mit
1,933,810,736,430,988,500
32.257143
130
0.559513
false
bapakode/OmMongo
examples/advanced_modeling.py
1
1048
''' This page is going to go through some more advanced modeling techniques using forward and self-references ''' from ommongo.document import Document from ommongo.fields import * from datetime import datetime from pprint import pprint class Event(Document): name = StringField() children = ListField(DocumentField('Event')) begin = DateTimeField() end = DateTimeField() def __init__(self, name, parent=None): Document.__init__(self, name=name) self.children = [] if parent is not None: parent.children.append(self) def __enter__(self): self.begin = datetime.utcnow() return self def __exit__(self, exc_type, exc_val, exc_tb): self.end = datetime.utcnow() with Event('request') as root: with Event('main_func', root) as br: with Event('setup', br): pass with Event('handle', br): pass with Event('teardown', br): pass with Event('cleanup', root): pass pprint(root.wrap())
mit
491,704,710,223,334,700
25.2
72
0.611641
false
mrachinskiy/jewelcraft
ops_gem/gem_select_ops.py
1
6067
# ##### BEGIN GPL LICENSE BLOCK ##### # # JewelCraft jewelry design toolkit for Blender. # Copyright (C) 2015-2021 Mikhail Rachinskiy # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # ##### END GPL LICENSE BLOCK ##### from bpy.props import EnumProperty, FloatProperty, BoolProperty from bpy.types import Operator from bpy.app.translations import pgettext_tip as _ from mathutils import Matrix from ..lib import dynamic_list class OBJECT_OT_gem_select_by_trait(Operator): bl_label = "Select Gems by Trait" bl_description = "Select gems by trait" bl_idname = "object.jewelcraft_gem_select_by_trait" bl_options = {"REGISTER", "UNDO"} filter_size: BoolProperty(name="Size", options={"SKIP_SAVE"}) filter_stone: BoolProperty(name="Stone", options={"SKIP_SAVE"}) filter_cut: BoolProperty(name="Cut", options={"SKIP_SAVE"}) filter_similar: BoolProperty(options={"SKIP_SAVE", "HIDDEN"}) size: FloatProperty( name="Size", default=1.0, min=0.0, step=10, precision=2, unit="LENGTH", ) stone: EnumProperty(name="Stone", items=dynamic_list.stones) cut: EnumProperty(name="Cut", items=dynamic_list.cuts) use_extend: BoolProperty(name="Extend", description="Extend selection") use_select_children: BoolProperty(name="Select Children") def draw(self, context): layout = self.layout layout.use_property_split = True layout.use_property_decorate = False row = layout.row(heading="Size") row.prop(self, "filter_size", text="") row.prop(self, "size", text="") row = layout.row(heading="Stone") row.prop(self, "filter_stone", text="") row.prop(self, "stone", text="") row = layout.row(heading="Cut", heading_ctxt="Jewelry") row.prop(self, "filter_cut", text="") row.template_icon_view(self, "cut", show_labels=True) layout.separator() layout.prop(self, "use_extend") layout.prop(self, "use_select_children") def execute(self, context): size = round(self.size, 2) check_size = check_stone = check_cut = lambda x: True if self.filter_size: check_size = lambda ob: round(ob.dimensions.y, 2) == size if self.filter_stone: check_stone = lambda ob: ob["gem"]["stone"] == self.stone if self.filter_cut: check_cut = lambda ob: ob["gem"]["cut"] == self.cut selected = None for ob in context.visible_objects: if "gem" in ob and check_size(ob) and check_stone(ob) and check_cut(ob): selected = ob ob.select_set(True) if self.use_select_children and ob.children: for child in ob.children: child.select_set(True) elif not self.use_extend: ob.select_set(False) if context.object is None or not context.object.select_get(): context.view_layer.objects.active = selected return {"FINISHED"} def invoke(self, context, event): ob = context.object if ob and "gem" in ob: self.size = ob.dimensions.y self.stone = ob["gem"]["stone"] self.cut = ob["gem"]["cut"] if self.filter_similar: self.filter_size = True self.filter_stone = True self.filter_cut = True return self.execute(context) class OBJECT_OT_gem_select_overlapping(Operator): bl_label = "Select Overlapping" bl_description = "Select gems that are less than 0.1 mm distance from each other or overlapping" bl_idname = "object.jewelcraft_gem_select_overlapping" bl_options = {"REGISTER", "UNDO"} threshold: FloatProperty( name="Threshold", default=0.1, soft_min=0.0, step=1, precision=2, unit="LENGTH", ) def execute(self, context): from ..lib import asset obs = [] ob_data = [] depsgraph = context.evaluated_depsgraph_get() for dup in depsgraph.object_instances: if dup.is_instance: ob = dup.instance_object.original else: ob = dup.object.original ob.select_set(False) if "gem" in ob: loc = dup.matrix_world.to_translation() rad = max(ob.dimensions[:2]) / 2 if dup.is_instance: mat = dup.matrix_world.copy() if ob.parent and ob.parent.is_instancer: sel = ob.parent else: sel = None else: mat_loc = Matrix.Translation(loc) mat_rot = dup.matrix_world.to_quaternion().to_matrix().to_4x4() mat = mat_loc @ mat_rot sel = ob loc.freeze() mat.freeze() obs.append(sel) ob_data.append((loc, rad, mat)) overlaps = asset.gem_overlap(context, ob_data, self.threshold) if overlaps: for i in overlaps: ob = obs[i] if ob: ob.select_set(True) self.report({"WARNING"}, _("{} overlaps found").format(len(overlaps))) else: self.report({"INFO"}, _("{} overlaps found").format(0)) return {"FINISHED"}
gpl-3.0
5,417,416,596,498,840,000
31.100529
100
0.577056
false
norus/procstat-json
tornado/test/httputil_test.py
1
7432
#!/usr/bin/env python from __future__ import absolute_import, division, with_statement from tornado.httputil import url_concat, parse_multipart_form_data, HTTPHeaders from tornado.escape import utf8 from tornado.log import gen_log from tornado.testing import ExpectLog from tornado.test.util import unittest from tornado.util import b import logging class TestUrlConcat(unittest.TestCase): def test_url_concat_no_query_params(self): url = url_concat( "https://localhost/path", [('y', 'y'), ('z', 'z')], ) self.assertEqual(url, "https://localhost/path?y=y&z=z") def test_url_concat_encode_args(self): url = url_concat( "https://localhost/path", [('y', '/y'), ('z', 'z')], ) self.assertEqual(url, "https://localhost/path?y=%2Fy&z=z") def test_url_concat_trailing_q(self): url = url_concat( "https://localhost/path?", [('y', 'y'), ('z', 'z')], ) self.assertEqual(url, "https://localhost/path?y=y&z=z") def test_url_concat_q_with_no_trailing_amp(self): url = url_concat( "https://localhost/path?x", [('y', 'y'), ('z', 'z')], ) self.assertEqual(url, "https://localhost/path?x&y=y&z=z") def test_url_concat_trailing_amp(self): url = url_concat( "https://localhost/path?x&", [('y', 'y'), ('z', 'z')], ) self.assertEqual(url, "https://localhost/path?x&y=y&z=z") def test_url_concat_mult_params(self): url = url_concat( "https://localhost/path?a=1&b=2", [('y', 'y'), ('z', 'z')], ) self.assertEqual(url, "https://localhost/path?a=1&b=2&y=y&z=z") def test_url_concat_no_params(self): url = url_concat( "https://localhost/path?r=1&t=2", [], ) self.assertEqual(url, "https://localhost/path?r=1&t=2") class MultipartFormDataTest(unittest.TestCase): def test_file_upload(self): data = b("""\ --1234 Content-Disposition: form-data; name="files"; filename="ab.txt" Foo --1234--""").replace(b("\n"), b("\r\n")) args = {} files = {} parse_multipart_form_data(b("1234"), data, args, files) file = files["files"][0] self.assertEqual(file["filename"], "ab.txt") self.assertEqual(file["body"], b("Foo")) def test_unquoted_names(self): # quotes are optional unless special characters are present data = b("""\ --1234 Content-Disposition: form-data; name=files; filename=ab.txt Foo --1234--""").replace(b("\n"), b("\r\n")) args = {} files = {} parse_multipart_form_data(b("1234"), data, args, files) file = files["files"][0] self.assertEqual(file["filename"], "ab.txt") self.assertEqual(file["body"], b("Foo")) def test_special_filenames(self): filenames = ['a;b.txt', 'a"b.txt', 'a";b.txt', 'a;"b.txt', 'a";";.txt', 'a\\"b.txt', 'a\\b.txt', ] for filename in filenames: logging.debug("trying filename %r", filename) data = """\ --1234 Content-Disposition: form-data; name="files"; filename="%s" Foo --1234--""" % filename.replace('\\', '\\\\').replace('"', '\\"') data = utf8(data.replace("\n", "\r\n")) args = {} files = {} parse_multipart_form_data(b("1234"), data, args, files) file = files["files"][0] self.assertEqual(file["filename"], filename) self.assertEqual(file["body"], b("Foo")) def test_boundary_starts_and_ends_with_quotes(self): data = b('''\ --1234 Content-Disposition: form-data; name="files"; filename="ab.txt" Foo --1234--''').replace(b("\n"), b("\r\n")) args = {} files = {} parse_multipart_form_data(b('"1234"'), data, args, files) file = files["files"][0] self.assertEqual(file["filename"], "ab.txt") self.assertEqual(file["body"], b("Foo")) def test_missing_headers(self): data = b('''\ --1234 Foo --1234--''').replace(b("\n"), b("\r\n")) args = {} files = {} with ExpectLog(gen_log, "multipart/form-data missing headers"): parse_multipart_form_data(b("1234"), data, args, files) self.assertEqual(files, {}) def test_invalid_content_disposition(self): data = b('''\ --1234 Content-Disposition: invalid; name="files"; filename="ab.txt" Foo --1234--''').replace(b("\n"), b("\r\n")) args = {} files = {} with ExpectLog(gen_log, "Invalid multipart/form-data"): parse_multipart_form_data(b("1234"), data, args, files) self.assertEqual(files, {}) def test_line_does_not_end_with_correct_line_break(self): data = b('''\ --1234 Content-Disposition: form-data; name="files"; filename="ab.txt" Foo--1234--''').replace(b("\n"), b("\r\n")) args = {} files = {} with ExpectLog(gen_log, "Invalid multipart/form-data"): parse_multipart_form_data(b("1234"), data, args, files) self.assertEqual(files, {}) def test_content_disposition_header_without_name_parameter(self): data = b("""\ --1234 Content-Disposition: form-data; filename="ab.txt" Foo --1234--""").replace(b("\n"), b("\r\n")) args = {} files = {} with ExpectLog(gen_log, "multipart/form-data value missing name"): parse_multipart_form_data(b("1234"), data, args, files) self.assertEqual(files, {}) def test_data_after_final_boundary(self): # The spec requires that data after the final boundary be ignored. # http://www.w3.org/Protocols/rfc1341/7_2_Multipart.html # In practice, some libraries include an extra CRLF after the boundary. data = b("""\ --1234 Content-Disposition: form-data; name="files"; filename="ab.txt" Foo --1234-- """).replace(b("\n"), b("\r\n")) args = {} files = {} parse_multipart_form_data(b("1234"), data, args, files) file = files["files"][0] self.assertEqual(file["filename"], "ab.txt") self.assertEqual(file["body"], b("Foo")) class HTTPHeadersTest(unittest.TestCase): def test_multi_line(self): # Lines beginning with whitespace are appended to the previous line # with any leading whitespace replaced by a single space. # Note that while multi-line headers are a part of the HTTP spec, # their use is strongly discouraged. data = """\ Foo: bar baz Asdf: qwer \tzxcv Foo: even more lines """.replace("\n", "\r\n") headers = HTTPHeaders.parse(data) self.assertEqual(headers["asdf"], "qwer zxcv") self.assertEqual(headers.get_list("asdf"), ["qwer zxcv"]) self.assertEqual(headers["Foo"], "bar baz,even more lines") self.assertEqual(headers.get_list("foo"), ["bar baz", "even more lines"]) self.assertEqual(sorted(list(headers.get_all())), [("Asdf", "qwer zxcv"), ("Foo", "bar baz"), ("Foo", "even more lines")])
gpl-3.0
8,831,329,724,078,077,000
31.740088
81
0.541039
false
vprime/puuuu
env/lib/python2.7/site-packages/paramiko/client.py
1
21100
# Copyright (C) 2006-2007 Robey Pointer <robeypointer@gmail.com> # # This file is part of paramiko. # # Paramiko is free software; you can redistribute it and/or modify it under the # terms of the GNU Lesser General Public License as published by the Free # Software Foundation; either version 2.1 of the License, or (at your option) # any later version. # # Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR # A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with Paramiko; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. """ SSH client & key policies """ from binascii import hexlify import getpass import os import socket import warnings from paramiko.agent import Agent from paramiko.common import * from paramiko.config import SSH_PORT from paramiko.dsskey import DSSKey from paramiko.hostkeys import HostKeys from paramiko.resource import ResourceManager from paramiko.rsakey import RSAKey from paramiko.ssh_exception import SSHException, BadHostKeyException from paramiko.transport import Transport from paramiko.util import retry_on_signal class SSHClient (object): """ A high-level representation of a session with an SSH server. This class wraps `.Transport`, `.Channel`, and `.SFTPClient` to take care of most aspects of authenticating and opening channels. A typical use case is:: client = SSHClient() client.load_system_host_keys() client.connect('ssh.example.com') stdin, stdout, stderr = client.exec_command('ls -l') You may pass in explicit overrides for authentication and server host key checking. The default mechanism is to try to use local key files or an SSH agent (if one is running). .. versionadded:: 1.6 """ def __init__(self): """ Create a new SSHClient. """ self._system_host_keys = HostKeys() self._host_keys = HostKeys() self._host_keys_filename = None self._log_channel = None self._policy = RejectPolicy() self._transport = None self._agent = None def load_system_host_keys(self, filename=None): """ Load host keys from a system (read-only) file. Host keys read with this method will not be saved back by `save_host_keys`. This method can be called multiple times. Each new set of host keys will be merged with the existing set (new replacing old if there are conflicts). If ``filename`` is left as ``None``, an attempt will be made to read keys from the user's local "known hosts" file, as used by OpenSSH, and no exception will be raised if the file can't be read. This is probably only useful on posix. :param str filename: the filename to read, or ``None`` :raises IOError: if a filename was provided and the file could not be read """ if filename is None: # try the user's .ssh key file, and mask exceptions filename = os.path.expanduser('~/.ssh/known_hosts') try: self._system_host_keys.load(filename) except IOError: pass return self._system_host_keys.load(filename) def load_host_keys(self, filename): """ Load host keys from a local host-key file. Host keys read with this method will be checked after keys loaded via `load_system_host_keys`, but will be saved back by `save_host_keys` (so they can be modified). The missing host key policy `.AutoAddPolicy` adds keys to this set and saves them, when connecting to a previously-unknown server. This method can be called multiple times. Each new set of host keys will be merged with the existing set (new replacing old if there are conflicts). When automatically saving, the last hostname is used. :param str filename: the filename to read :raises IOError: if the filename could not be read """ self._host_keys_filename = filename self._host_keys.load(filename) def save_host_keys(self, filename): """ Save the host keys back to a file. Only the host keys loaded with `load_host_keys` (plus any added directly) will be saved -- not any host keys loaded with `load_system_host_keys`. :param str filename: the filename to save to :raises IOError: if the file could not be written """ # update local host keys from file (in case other SSH clients # have written to the known_hosts file meanwhile. if self._host_keys_filename is not None: self.load_host_keys(self._host_keys_filename) f = open(filename, 'w') for hostname, keys in self._host_keys.iteritems(): for keytype, key in keys.iteritems(): f.write('%s %s %s\n' % (hostname, keytype, key.get_base64())) f.close() def get_host_keys(self): """ Get the local `.HostKeys` object. This can be used to examine the local host keys or change them. :return: the local host keys as a `.HostKeys` object. """ return self._host_keys def set_log_channel(self, name): """ Set the channel for logging. The default is ``"paramiko.transport"`` but it can be set to anything you want. :param str name: new channel name for logging """ self._log_channel = name def set_missing_host_key_policy(self, policy): """ Set the policy to use when connecting to a server that doesn't have a host key in either the system or local `.HostKeys` objects. The default policy is to reject all unknown servers (using `.RejectPolicy`). You may substitute `.AutoAddPolicy` or write your own policy class. :param .MissingHostKeyPolicy policy: the policy to use when receiving a host key from a previously-unknown server """ self._policy = policy def connect(self, hostname, port=SSH_PORT, username=None, password=None, pkey=None, key_filename=None, timeout=None, allow_agent=True, look_for_keys=True, compress=False, sock=None): """ Connect to an SSH server and authenticate to it. The server's host key is checked against the system host keys (see `load_system_host_keys`) and any local host keys (`load_host_keys`). If the server's hostname is not found in either set of host keys, the missing host key policy is used (see `set_missing_host_key_policy`). The default policy is to reject the key and raise an `.SSHException`. Authentication is attempted in the following order of priority: - The ``pkey`` or ``key_filename`` passed in (if any) - Any key we can find through an SSH agent - Any "id_rsa" or "id_dsa" key discoverable in ``~/.ssh/`` - Plain username/password auth, if a password was given If a private key requires a password to unlock it, and a password is passed in, that password will be used to attempt to unlock the key. :param str hostname: the server to connect to :param int port: the server port to connect to :param str username: the username to authenticate as (defaults to the current local username) :param str password: a password to use for authentication or for unlocking a private key :param .PKey pkey: an optional private key to use for authentication :param str key_filename: the filename, or list of filenames, of optional private key(s) to try for authentication :param float timeout: an optional timeout (in seconds) for the TCP connect :param bool allow_agent: set to False to disable connecting to the SSH agent :param bool look_for_keys: set to False to disable searching for discoverable private key files in ``~/.ssh/`` :param bool compress: set to True to turn on compression :param socket sock: an open socket or socket-like object (such as a `.Channel`) to use for communication to the target host :raises BadHostKeyException: if the server's host key could not be verified :raises AuthenticationException: if authentication failed :raises SSHException: if there was any other error connecting or establishing an SSH session :raises socket.error: if a socket error occurred while connecting """ if not sock: for (family, socktype, proto, canonname, sockaddr) in socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM): if socktype == socket.SOCK_STREAM: af = family addr = sockaddr break else: # some OS like AIX don't indicate SOCK_STREAM support, so just guess. :( af, _, _, _, addr = socket.getaddrinfo(hostname, port, socket.AF_UNSPEC, socket.SOCK_STREAM) sock = socket.socket(af, socket.SOCK_STREAM) if timeout is not None: try: sock.settimeout(timeout) except: pass retry_on_signal(lambda: sock.connect(addr)) t = self._transport = Transport(sock) t.use_compression(compress=compress) if self._log_channel is not None: t.set_log_channel(self._log_channel) t.start_client() ResourceManager.register(self, t) server_key = t.get_remote_server_key() keytype = server_key.get_name() if port == SSH_PORT: server_hostkey_name = hostname else: server_hostkey_name = "[%s]:%d" % (hostname, port) our_server_key = self._system_host_keys.get(server_hostkey_name, {}).get(keytype, None) if our_server_key is None: our_server_key = self._host_keys.get(server_hostkey_name, {}).get(keytype, None) if our_server_key is None: # will raise exception if the key is rejected; let that fall out self._policy.missing_host_key(self, server_hostkey_name, server_key) # if the callback returns, assume the key is ok our_server_key = server_key if server_key != our_server_key: raise BadHostKeyException(hostname, server_key, our_server_key) if username is None: username = getpass.getuser() if key_filename is None: key_filenames = [] elif isinstance(key_filename, (str, unicode)): key_filenames = [ key_filename ] else: key_filenames = key_filename self._auth(username, password, pkey, key_filenames, allow_agent, look_for_keys) def close(self): """ Close this SSHClient and its underlying `.Transport`. """ if self._transport is None: return self._transport.close() self._transport = None if self._agent != None: self._agent.close() self._agent = None def exec_command(self, command, bufsize=-1, timeout=None, get_pty=False): """ Execute a command on the SSH server. A new `.Channel` is opened and the requested command is executed. The command's input and output streams are returned as Python ``file``-like objects representing stdin, stdout, and stderr. :param str command: the command to execute :param int bufsize: interpreted the same way as by the built-in ``file()`` function in Python :param int timeout: set command's channel timeout. See `Channel.settimeout`.settimeout :return: the stdin, stdout, and stderr of the executing command, as a 3-tuple :raises SSHException: if the server fails to execute the command """ chan = self._transport.open_session() if(get_pty): chan.get_pty() chan.settimeout(timeout) chan.exec_command(command) stdin = chan.makefile('wb', bufsize) stdout = chan.makefile('rb', bufsize) stderr = chan.makefile_stderr('rb', bufsize) return stdin, stdout, stderr def invoke_shell(self, term='vt100', width=80, height=24, width_pixels=0, height_pixels=0): """ Start an interactive shell session on the SSH server. A new `.Channel` is opened and connected to a pseudo-terminal using the requested terminal type and size. :param str term: the terminal type to emulate (for example, ``"vt100"``) :param int width: the width (in characters) of the terminal window :param int height: the height (in characters) of the terminal window :param int width_pixels: the width (in pixels) of the terminal window :param int height_pixels: the height (in pixels) of the terminal window :return: a new `.Channel` connected to the remote shell :raises SSHException: if the server fails to invoke a shell """ chan = self._transport.open_session() chan.get_pty(term, width, height, width_pixels, height_pixels) chan.invoke_shell() return chan def open_sftp(self): """ Open an SFTP session on the SSH server. :return: a new `.SFTPClient` session object """ return self._transport.open_sftp_client() def get_transport(self): """ Return the underlying `.Transport` object for this SSH connection. This can be used to perform lower-level tasks, like opening specific kinds of channels. :return: the `.Transport` for this connection """ return self._transport def _auth(self, username, password, pkey, key_filenames, allow_agent, look_for_keys): """ Try, in order: - The key passed in, if one was passed in. - Any key we can find through an SSH agent (if allowed). - Any "id_rsa" or "id_dsa" key discoverable in ~/.ssh/ (if allowed). - Plain username/password auth, if a password was given. (The password might be needed to unlock a private key, or for two-factor authentication [for which it is required].) """ saved_exception = None two_factor = False allowed_types = [] if pkey is not None: try: self._log(DEBUG, 'Trying SSH key %s' % hexlify(pkey.get_fingerprint())) allowed_types = self._transport.auth_publickey(username, pkey) two_factor = (allowed_types == ['password']) if not two_factor: return except SSHException, e: saved_exception = e if not two_factor: for key_filename in key_filenames: for pkey_class in (RSAKey, DSSKey): try: key = pkey_class.from_private_key_file(key_filename, password) self._log(DEBUG, 'Trying key %s from %s' % (hexlify(key.get_fingerprint()), key_filename)) self._transport.auth_publickey(username, key) two_factor = (allowed_types == ['password']) if not two_factor: return break except SSHException, e: saved_exception = e if not two_factor and allow_agent: if self._agent == None: self._agent = Agent() for key in self._agent.get_keys(): try: self._log(DEBUG, 'Trying SSH agent key %s' % hexlify(key.get_fingerprint())) # for 2-factor auth a successfully auth'd key will result in ['password'] allowed_types = self._transport.auth_publickey(username, key) two_factor = (allowed_types == ['password']) if not two_factor: return break except SSHException, e: saved_exception = e if not two_factor: keyfiles = [] rsa_key = os.path.expanduser('~/.ssh/id_rsa') dsa_key = os.path.expanduser('~/.ssh/id_dsa') if os.path.isfile(rsa_key): keyfiles.append((RSAKey, rsa_key)) if os.path.isfile(dsa_key): keyfiles.append((DSSKey, dsa_key)) # look in ~/ssh/ for windows users: rsa_key = os.path.expanduser('~/ssh/id_rsa') dsa_key = os.path.expanduser('~/ssh/id_dsa') if os.path.isfile(rsa_key): keyfiles.append((RSAKey, rsa_key)) if os.path.isfile(dsa_key): keyfiles.append((DSSKey, dsa_key)) if not look_for_keys: keyfiles = [] for pkey_class, filename in keyfiles: try: key = pkey_class.from_private_key_file(filename, password) self._log(DEBUG, 'Trying discovered key %s in %s' % (hexlify(key.get_fingerprint()), filename)) # for 2-factor auth a successfully auth'd key will result in ['password'] allowed_types = self._transport.auth_publickey(username, key) two_factor = (allowed_types == ['password']) if not two_factor: return break except SSHException, e: saved_exception = e except IOError, e: saved_exception = e if password is not None: try: self._transport.auth_password(username, password) return except SSHException, e: saved_exception = e elif two_factor: raise SSHException('Two-factor authentication requires a password') # if we got an auth-failed exception earlier, re-raise it if saved_exception is not None: raise saved_exception raise SSHException('No authentication methods available') def _log(self, level, msg): self._transport._log(level, msg) class MissingHostKeyPolicy (object): """ Interface for defining the policy that `.SSHClient` should use when the SSH server's hostname is not in either the system host keys or the application's keys. Pre-made classes implement policies for automatically adding the key to the application's `.HostKeys` object (`.AutoAddPolicy`), and for automatically rejecting the key (`.RejectPolicy`). This function may be used to ask the user to verify the key, for example. """ def missing_host_key(self, client, hostname, key): """ Called when an `.SSHClient` receives a server key for a server that isn't in either the system or local `.HostKeys` object. To accept the key, simply return. To reject, raised an exception (which will be passed to the calling application). """ pass class AutoAddPolicy (MissingHostKeyPolicy): """ Policy for automatically adding the hostname and new host key to the local `.HostKeys` object, and saving it. This is used by `.SSHClient`. """ def missing_host_key(self, client, hostname, key): client._host_keys.add(hostname, key.get_name(), key) if client._host_keys_filename is not None: client.save_host_keys(client._host_keys_filename) client._log(DEBUG, 'Adding %s host key for %s: %s' % (key.get_name(), hostname, hexlify(key.get_fingerprint()))) class RejectPolicy (MissingHostKeyPolicy): """ Policy for automatically rejecting the unknown hostname & key. This is used by `.SSHClient`. """ def missing_host_key(self, client, hostname, key): client._log(DEBUG, 'Rejecting %s host key for %s: %s' % (key.get_name(), hostname, hexlify(key.get_fingerprint()))) raise SSHException('Server %r not found in known_hosts' % hostname) class WarningPolicy (MissingHostKeyPolicy): """ Policy for logging a Python-style warning for an unknown host key, but accepting it. This is used by `.SSHClient`. """ def missing_host_key(self, client, hostname, key): warnings.warn('Unknown %s host key for %s: %s' % (key.get_name(), hostname, hexlify(key.get_fingerprint())))
mit
-2,178,805,618,934,213,600
39.655106
139
0.603839
false
BlackHole/enigma2-obh10
lib/python/Screens/EpgSelection.py
2
4137
from __future__ import print_function from Screens.InfoBar import InfoBar from enigma import eServiceReference from Components.ActionMap import HelpableActionMap from Screens.EpgSelectionChannel import EPGSelectionChannel from Screens.EpgSelectionBase import EPGServiceZap from Screens.TimerEntry import addTimerFromEventSilent # Keep for backwards compatibility with plugins, including the parameter naming. # This class assumes that EPGSelection is only used in the SingleEPG sense. class EPGSelection(EPGSelectionChannel, EPGServiceZap): def __init__(self, session, service=None, zapFunc=None, eventid=None, bouquetChangeCB=None, serviceChangeCB=None, EPGtype="similar", StartBouquet=None, StartRef=None, bouquets=None): if EPGtype not in ("similar", "single"): print("[EPGSelection] Warning: EPGSelection does not support type '%s'" % EPGtype) print(" Attempting to continue in single EPG mode") EPGSelectionChannel.__init__(self, session, eServiceReference(service)) EPGServiceZap.__init__(self, zapFunc or InfoBar.instance.zapToService) # Rewrite the EPG actions to invoke the compatibility functions. helpDescription = _("EPG Commands") self["epgactions"] = HelpableActionMap(self, "EPGSelectActions", { "info": (self.Info, _("Show detailed event info")), "epg": (self.epgButtonPressed, _("Show detailed event info")), "menu": (self.createSetup, _("Setup menu")) }, prio=-1, description=helpDescription) self["colouractions"] = HelpableActionMap(self, "ColorActions", { "red": (self.redButtonPressed, _("IMDB search for current event")), "redlong": (self.redButtonPressedLong, _("Sort EPG list")), "green": (self.greenButtonPressed, _("Add/Remove timer for current event")), "greenlong": (self.greenButtonPressedLong, _("Show timer list")), "yellow": (self.yellowButtonPressed, _("Search for similar events")), "blue": (self.blueButtonPressed, _("Add an autotimer for current event")), "bluelong": (self.blueButtonPressedLong, _("Show autotimer list")) }, prio=-1, description=helpDescription) # EPGSearch bypasses base class initialisation # try to limit the scope of its quirkyness by providing a limited # initialisation path def EPGSearch_init(self, session): EPGServiceZap.__init__(self, InfoBar.instance.zapToService) # Backwards compatibility properties for plugins. @property def ChoiceBoxDialog(self): return self.choiceBoxDialog @ChoiceBoxDialog.setter def ChoiceBoxDialog(self, value): self.choiceBoxDialog = value # Backwards compatibility functions for plugins. # Button names. def redButtonPressed(self): self.openIMDb() def redButtonPressedLong(self): self.sortEpg() def greenButtonPressed(self): self.addEditTimer() def greenButtonPressedLong(self): self.showTimerList() def yellowButtonPressed(self): self.openEPGSearch() def blueButtonPressed(self): self.addAutoTimer() def blueButtonPressedLong(self): self.showAutoTimerList() def Info(self): self.infoKeyPressed() def InfoLong(self): self.OpenSingleEPG() def infoKeyPressed(self): self.openEventView() def eventSelected(self): # used by EPG Search plugin self.openEventView() def epgButtonPressed(self): self.openEventView() # Actions def showTimerList(self): self.openTimerList() def showAutoTimerList(self): self.openAutoTimerList() def OpenSingleEPG(self): self.openSingleEPG() def sortEpg(self): self.sortEPG(self) def timerAdd(self): self.addEditTimerMenu() def doRecordTimer(self): self.doInstantTimer(0) def doZapTimer(self): self.doInstantTimer(1) def RecordTimerQuestion(self, manual=False): if manual: self.addEditTimer() else: self.addEditTimerMenu() def doInstantTimer(self, zap=0): event, service = self["list"].getCurrent()[:2] addTimerFromEventSilent(self.session, self.refreshTimerActionButton, event, service, zap) # Things that need to be able to be overridden. def refreshList(self): try: # Allow plugins to override using the old all lowercase method name. self.refreshlist() except AttributeError: EPGSelectionChannel.refreshList(self)
gpl-2.0
-7,913,178,780,223,782,000
31.320313
183
0.751511
false
cpatrickalves/simprev
util/carrega_parametros.py
1
1280
# -*- coding: utf-8 -*- """ @author: Patrick Alves """ # Arquivo com os parâmetros de projeção arquivo_parametros = "parametros.txt" def obter_parametros(): # Dicionário que armazena os parâmetros parametros = {} with open(arquivo_parametros, 'r' ,encoding='utf-8') as arquivo: for linha in arquivo: linha = linha.strip() if not linha: # pula linhas em branco continue if linha.startswith("#"): # pula comentários continue # Pega a primeira string antes do = e remove os espaços em branco variavel = linha.split('=')[0].replace(" ", "") # Pega a segunda string antes do = e remove os espaços em branco valor = linha.split('=')[1].replace(" ", "") # Salva variáveis e parâmetros no dicionário # a variável modelo é a única do tipo string, as demais são int ou float if variavel == 'modelo': parametros[variavel] = valor else: try: parametros[variavel] = int(valor) except: parametros[variavel] = float(valor) return parametros
gpl-3.0
8,191,328,923,808,139,000
34.166667
84
0.528063
false
ryepdx/sale_negotiated_shipping
__init__.py
1
1238
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2015 RyePDX LLC # Copyright (C) 2011 NovaPoint Group LLC (<http://www.novapointgroup.com>) # Copyright (C) 2004-2010 OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## import sale_negotiated_shipping import sale import stock import account_invoice import wizard # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
2,449,599,019,303,715,000
40.3
78
0.636511
false
Ginkgo-Biloba/Misc-Python
numpy/FractalAndChaos.py
1
10149
# coding=utf-8 """ 分形与混沌绘图 """ import numpy as np import pylab as pl import time from matplotlib import cm, collections from math import log2, sin, cos """ Mandelbrot 集合 f_c(z) = z^2 + c, c \in \doubleZ Mandelbrot 集合就是使以上序列不发散的所有c点的集合。 用程序绘制 Mandelbrot 集合时不能进行无限次迭代,最简单的方法是使用逃逸时间 (迭代次数) 进行绘制,具体算法如下: 判断每次调用函数 f_ c(z) 得到的结果是否在半径 R 之内,即复数的模小于 R 记录下模大于 R 时的迭代次数 迭代最多进行 N 次 不同的迭代次数的点使用不同的颜色绘制 """ def iterPoint(c): """ 计算逃逸所需的迭代次数 最多迭代 100 次 """ z = c for i in range(1, 200): # 最多迭代 100 次 if (abs(z) > 2): # 半径大于 2 认为是逃逸 break z *= z; z += c return i # 返回迭代次数 def smoothIterPoint(c, iterN, escR): """" 为了在不同的梯度之间进行渐变处理,使用下面的公式进行逃逸时间计算 n - \log_2 \log_2 |z_n| z_n 是迭代n次之后的结果,通过在逃逸时间的计算中引入迭代结果的模值,结果将不再是整数,而是平滑渐变的。 """ z = c for i in range(1, iterN): if (abs(z) > escR): break z *= z; z += c absz = abs(z) if (absz > 2.0): mu = i - log2(log2(absz)) else: mu = i return mu def drawMandelbrot(cx, cy, d): """ 绘制点 (cx, cy) 附近正负 d 范围的 Mandelbrot 集合""" (x0, x1, y0, y1) = (cx - d, cx + d, cy - d, cy + d) (y, x) = np.ogrid[y0:y1:400j, x0:x1:400j] c = x + y * 1j start = time.time() # mdb = np.frompyfunc(iterPoint, 1, 1)(c).astype(np.float) mdb = np.frompyfunc(smoothIterPoint, 3, 1)(c, 20, 10).astype(np.float) print ("time =", time.time() - start) pl.imshow(mdb, cmap=cm.Blues_r, extent=[x0, x1, y0, y1]) pl.gca().set_axis_off() def drawMdb(cx, cy, d, N=400): """ 绘制点 (cx, cy) 附近正负 d 范围的 Mandelbrot 集合 使用 NumPy 数组运算加速计算 """ global mdb (x0, x1, y0, y1) = (cx - d, cx + d, cy - d, cy + d) (y, x) = np.ogrid[y0: y1: N*1j, x0: x1: N*1j] c = x + y * 1j # 创建 X Y 轴的坐标数组 用来保存没有逃逸的点的下标 (ix, iy) = np.mgrid[0:N, 0:N] # 创建保存 Mandelbrot 图的二维数组 默认值为最大迭代次数 100 mdb = np.ones(c.shape, dtype=np.int) * 100 # 将数组都变成一维的 ix.shape = -1; iy.shape = -1; c.shape = -1 z = c.copy() # 从 c 开始迭代 因此开始的迭代次数为 1 start = time.time() for i in range(1, 100): z *= z; z += c # 一次迭代 tmp = np.abs(z) > 2.0 # 找到所有逃逸的点 mdb[ix[tmp], iy[tmp]] = i # 将这些逃逸点的迭代次数赋值给 Mandelbrot 图 np.logical_not(tmp, tmp) # 找到所有没有逃逸的点 (ix, iy, c, z) = (ix[tmp], iy[tmp], c[tmp], z[tmp]) # 更新 ix iy c z 使其只包含没有逃逸的点 if (len(z) == 0): break print ("time =", time.time() - start) pl.imshow(mdb, cmap=cm.Blues_r, extent=[x0, x1, y0, y1]) pl.gca().set_axis_off() def MandelbrotDemo(): """ 展示 Mandelbrot 集合 """ (x, y) = (0.27322626, 0.595153338) pl.subplot(231) # drawMandelbrot(-0.5, 0, 1.5) drawMdb(-0.5, 0, 1.5) for i in range(2, 7): pl.subplot(230 + i) # drawMandelbrot(x, y, 0.2**(i - 1)) drawMdb(x, y, 0.2**(i - 1)) pl.subplots_adjust(0.02, 0, 0.98, 1, 0.02, 0) # pl.savefig("FractalAndChaos-2.png") pl.show() """ 迭代函数系统是一种用来创建分形图案的算法,它所创建的分形图永远是绝对自相似的。下面我们直接通过绘制一种蕨类植物的叶子来说明迭代函数系统的算法: 有下面4个线性函数将二维平面上的坐标进行线性映射变换: 1. x(n+1)= 0 y(n+1) = 0.16 * y(n) 2. x(n+1) = 0.2 * x(n) − 0.26 * y(n) y(n+1) = 0.23 * x(n) + 0.22 * y(n) + 1.6 3. x(n+1) = −0.15 * x(n) + 0.28 * y(n) y(n+1) = 0.26 * x(n) + 0.24 * y(n) + 0.44 4. x(n+1) = 0.85 * x(n) + 0.04 * y(n) y(n+1) = −0.04 * x(n) + 0.85 * y(n) + 1.6 现在的问题是有 4 个迭代函数,迭代时选择哪个函数进行计算呢?我们为每个函数指定一个概率值,它们依次为1%, 7%, 7%和85%。选择迭代函数时使用通过每个函数的概率随机选择一个函数进行迭代。上面的例子中,第四个函数被选择迭代的概率最高。 最后我们从坐标原点(0,0)开始迭代,将每次迭代所得到的坐标绘制成图,就得到了叶子的分形图案。下面的程序演示这一计算过程: """ def IFS(p, eq, init, n): """ p: 每个函数的选择概率列表 eq: 迭代函数列表 init: 迭代初始点 n: 迭代次数 返回值: 每次迭代所得的X坐标数组, Y坐标数组, 计算所用的函数下标 """ # 迭代向量的初始化 pos = np.ones(3, dtype=np.float) pos[:2] = init # 通过函数概率,计算函数的选择序列 p = np.add.accumulate(p) rands = np.random.rand(n) select = np.ones(n, dtype=np.int) * (n - 1) for (i, x) in enumerate(p[::-1]): select[rands < x] = len(p) - i - 1 # 结果的初始化 rst = np.zeros((n, 2), dtype=np.float) c = np.zeros(n, dtype=np.float) for i in range(n): eqidx = select[i] # 所选函数的下标 tmp = np.dot(eq[eqidx], pos) # 进行迭代 pos[:2] = tmp # 更新迭代向量 rst[i] = tmp; c[i] = eqidx # 保存结果 return (rst[:, 0], rst[:, 1], c) def IFSDemo(): """ 使用迭代函数系统绘制蕨类植物叶子 """ eq1 = np.array([[0,0,0],[0,0.16,0]]) p1 = 0.01 eq2 = np.array([[0.2,-0.26,0],[0.23,0.22,1.6]]) p2 = 0.07 eq3 = np.array([[-0.15, 0.28, 0],[0.26,0.24,0.44]]) p3 = 0.07 eq4 = np.array([[0.85, 0.04, 0],[-0.04, 0.85, 1.6]]) p4 = 0.85 start = time.time() (x, y, c) = IFS([p1, p2, p3, p4], [eq1, eq2, eq3, eq4], [0, 0], 100000) print ("time =", time.time() - start) pl.figure(figsize=(7, 7)) pl.subplot(121) pl.scatter(x, y, s=1, c="g", marker="s", linewidths=0) pl.axis("equal"); pl.axis("off") pl.subplot(122) pl.scatter(x, y, s=1, c=c, marker="s", linewidths=0) pl.axis("equal"); pl.axis("off") pl.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0) pl.gcf().patch.set_facecolor("white") # pl.savefig("FractalAndChaos-2.png") pl.show() """ 前面所绘制的分形图案都是都是使用数学函数的迭代产生,而L-System分形则是采用符号的递归迭代产生。首先如下定义几个有含义的符号: F : 向前走固定单位 + : 正方向旋转固定单位 - : 负方向旋转固定单位 使用这三个符号我们很容易描述下图中由 4 条线段构成的图案: F+F--F+F 如果将此符号串中的所有F都替换为F+F--F+F,就能得到如下的新字符串: F+F--F+F+F+F--F+F--F+F--F+F+F+F--F+F 如此替换迭代下去,并根据字串进行绘图(符号+和-分别正负旋转60度),可得到如下的分形图案:... 除了 F, +, - 之外我们再定义如下几个符号: f : 向前走固定单位,为了定义不同的迭代公式 [ : 将当前的位置入堆栈 ] : 从堆栈中读取坐标,修改当前位置 S : 初始迭代符号 所有的符号 (包括上面未定义的) 都可以用来定义迭代,通过引入两个方括号符号,使得我们能够描述分岔的图案。 例如下面的符号迭代能够绘制出一棵植物: S -> X X -> F-[[X]+X]+F[+FX]-X F -> FF 我们用一个字典定义所有的迭代公式和其它的一些绘图信息: { "X":"F-[[X]+X]+F[+FX]-X", "F":"FF", "S":"X", "direct":-45, "angle":25, "iter":6, "title":"Plant" } 其中: direct : 是绘图的初始角度,通过指定不同的值可以旋转整个图案 angle : 定义符号+,-旋转时的角度,不同的值能产生完全不同的图案 iter : 迭代次数 下面的程序将上述字典转换为需要绘制的线段坐标: """ class LSystem(object): def __init__(self, rule): info = rule["S"] for i in range(rule["iter"]): ninfo = list() for c in info: if c in rule: ninfo.append(rule[c]) else: ninfo.append(c) info = "".join(ninfo) # 迭代一次完成 self.rule = rule self.info = info def getLines(self): d = self.rule["direct"] a = self.rule["angle"] p = (0, 0) # 初始坐标 l = 1.0 # 步长 lines = list() stack = list() for c in self.info: if c in "Ff": #前进 r = d * np.pi / 180 t = (p[0] + l * cos(r), p[1] + l * sin(r)) lines.append((p, t)) p = t elif (c == "+"): # 逆时针旋转 d += a elif (c == "-"): # 顺时针旋转 d -= a elif (c == "["): stack.append((p, d)) elif (c == "]"): (p, d) = stack[-1] del stack[-1] return lines def drawLSys(ax, rule, iter=None): """ 画图 """ if iter is not None: rule["iter"] = iter lines = LSystem(rule).getLines() lineCll = collections.LineCollection(lines) ax.add_collection(lineCll, autolim=True) # if "title" in rule.keys(): pl.title(rule["title"]) ax.axis("equal"); ax.axis("off") ax.set_xlim(ax.dataLim.xmin, ax.dataLim.xmax) ax.invert_yaxis() def LSysDemo(): """ 演示分形绘制 """ rules = [ { "F":"F+F--F+F", "S":"F", "direct":180, "angle":60, "iter":5, "title":"Koch" }, { "X":"X+YF+", "Y":"-FX-Y", "S":"FX", "direct":0, "angle":90, "iter":13, "title":"Dragon" }, { "f":"F-f-F", "F":"f+F+f", "S":"f", "direct":0, "angle":60, "iter":7, "title":"Triangle" }, { "X":"F-[[X]+X]+F[+FX]-X", "F":"FF", "S":"X", "direct":-45, "angle":25, "iter":6, "title":"Plant" }, { "S":"X", "X":"-YF+XFX+FY-", "Y":"+XF-YFY-FX+", "direct":0, "angle":90, "iter":6, "title":"Hilbert" }, { "S":"L--F--L--F", "L":"+R-F-R+", "R":"-L+F+L-", "direct":0, "angle":45, "iter":10, "title":"Sierpinski" }, ] fig = pl.figure(figsize = (7, 5)) fig.patch.set_facecolor("white") for i in range(6): ax = fig.add_subplot(231 + i) drawLSys(ax, rules[i]) fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0) # pl.savefig("FractalAndChaos-3.png") pl.show() if (__name__ == "__main__"): # MandelbrotDemo() IFSDemo() # LSysDemo()
gpl-3.0
4,156,537,185,942,738,400
22.5
120
0.579037
false
mothsART/linkmanager
linkmanager/tests/interface.py
1
20597
import json from io import StringIO from unittest.mock import (patch, mock_open, MagicMock) import asyncio from linkmanager.translation import gettext as _ class CP(object): result = '' def cpvar(self, r): self.result = r cp = CP() addlink = iter([ ### input on: test_cmd_flush _('Y'), _('n'), ### input on: test_cmd_addlinks 'http://link1.com http://link2.com http://link3.com', 'link1_tag1 link1_tag2 link1_tag3', 1, 'link_1 description...', '', 'link2_tag1 link2_tag2', 5, 'link_2 description...', 'link2_title', 'link3_tag1', 'incorrect priority value', 15, 5, 'link_3 description...', False, # 'link3_title', ### input on: test_cmd_addlinks_with_update 'http://link2.com http://link3.com http://link4.com', _('n'), # like Yes '', 'link3_tag1 link3_tag2 link3_tag3', 7, 'link_3 description...', '', 'link4_tag1', 8, 'link_4 description...', '', ### input on: test_cmd_updatelinks 'http://link1.com', 'link1_tag1 link1_tag3 link1_tag4', 2, 'link_1 description...', '', ### input on: test_cmd_updatelinks_with_add 'http://link3.com http://link5.com http://link6.com', 'link3_tag1 link3_tag2 link3_tag3', 10, 'link_3 new description...', '', _('n'), _('Y'), 'link6_tag1 link6_tag2 link6_tag3', 9, 'link_6 description...', '', ### input on: test_cmd_removelinks '' ]) def get_input(string): return next(addlink) tty_i = None from linkmanager.tests import fakesettings import linkmanager fakesettings.INDENT = 4 fakesettings.ACTIVE_CACHE = False linkmanager.settings = fakesettings INDENT = fakesettings.INDENT class FakeClientResponse: url = '' @asyncio.coroutine def read(self, decode=False): return '<html><head><title>fake title of %s</title></head></html>' % self.url @asyncio.coroutine def fake_request(method, url): with open('fake.json', 'w') as f: f.write('ok') f = FakeClientResponse() f.url = url return f @asyncio.coroutine def fake_tqdm(coros, total): f = FakeClientResponse() return f @patch('linkmanager.db.aiohttp.request', fake_request) @patch('linkmanager.db.tqdm.tqdm', fake_tqdm) @patch('builtins.input', get_input) @patch('sys.stdout', new_callable=StringIO) def test_cmd_flush(mock_stdout): from linkmanager import interface global tty_i tty_i = interface(test=True) assert tty_i.flush() is True assert mock_stdout.getvalue() == ''.join([ _("You're about to empty the entire Database."), _("Are you sure [Y/n] ?"), _("Database entirely flushed.") + "\n" ]) mock_stdout.truncate(0) mock_stdout.seek(0) assert tty_i.flush() is False assert mock_stdout.getvalue() == ''.join([ _("You're about to empty the entire Database."), _("Are you sure [Y/n] ?") ]) class FakeUrlOpen: url = '' def read(self): return '<html><head><title>fake title of %s</title></head></html>' % self.url def fake_url_open(url): f = FakeUrlOpen() f.url = url return f @patch('linkmanager.db.aiohttp.request', fake_request) @patch('linkmanager.db.tqdm.tqdm', fake_tqdm) @patch('builtins.input', get_input) @patch('arrow.now', lambda: "2014-02-10T19:59:34.612714+01:00") @patch('urllib.request.urlopen', fake_url_open) @patch('sys.stdout', new_callable=StringIO) def test_cmd_addlinks(mock_stdout): #def test_cmd_addlinks(): tty_i.flush(forced='forced') assert mock_stdout.getvalue() == _('Database entirely flushed.') + '\n' mock_stdout.seek(0) assert tty_i.addlinks() is True assert mock_stdout.getvalue() == ''.join([ _('Give one or several links (separate with spaces)'), ' :', _('%s properties') % 'http://link1.com', ' :\n', ' ' * INDENT, _('tags (at least one, several separate with spaces)'), ' :', ' ' * INDENT, _('priority value (integer value between 1 and 10)'), ' :', ' ' * INDENT, _('give a description'), ' :', ' ' * INDENT, _('give a title'), ' :', _('%s properties') % 'http://link2.com', ' :\n', ' ' * INDENT, _('tags (at least one, several separate with spaces)'), ' :', ' ' * INDENT, _('priority value (integer value between 1 and 10)'), ' :', ' ' * INDENT, _('give a description'), ' :', ' ' * INDENT, _('give a title'), ' :', _('%s properties') % 'http://link3.com', ' :\n', ' ' * INDENT, _('tags (at least one, several separate with spaces)'), ' :', ' ' * INDENT, _('priority value (integer value between 1 and 10)'), ' :', ' ' * INDENT, _('priority value not range between 1 and 10, retry'), ' :', ' ' * INDENT, _('priority value not range between 1 and 10, retry'), ' :', ' ' * INDENT, _('give a description'), ' :', ' ' * INDENT, _('give a title'), ' :', ]) @patch('linkmanager.db.tqdm.tqdm', fake_tqdm) @patch('builtins.input', get_input) @patch('sys.stdout', new_callable=StringIO) @patch('arrow.now', lambda: "2014-02-14T10:22:34.612714+01:00") @patch('linkmanager.settings.AUTHOR', 'Author name') def test_cmd_addlinks_with_update(mock_stdout): assert tty_i.addlinks() is True assert mock_stdout.getvalue() == ''.join([ _('Give one or several links (separate with spaces)'), ' :', ' ' * INDENT, _( 'the link "%s" already exist: ' 'do you want to update [Y/n] ?' ) % 'http://link2.com', ' :', ' ' * INDENT, _( 'the link "%s" already exist: ' 'do you want to update [Y/n] ?' ) % 'http://link3.com', ' :', _('%s properties') % 'http://link3.com', ' :\n', ' ' * INDENT, _('tags (at least one, several separate with spaces)'), ' :', ' ' * INDENT, _('priority value (integer value between 1 and 10)'), ' :', ' ' * INDENT, _('give a description'), ' :', ' ' * INDENT, _('give a title'), ' :', _('%s properties') % 'http://link4.com', ' :\n', ' ' * INDENT, _('tags (at least one, several separate with spaces)'), ' :', ' ' * INDENT, _('priority value (integer value between 1 and 10)'), ' :', ' ' * INDENT, _('give a description'), ' :', ' ' * INDENT, _('give a title'), ' :', ]) dump_afteradd = """{ "http://link1.com": { "author": "Author name", "description": "link_1 description...", "init date": "2014-02-10T19:59:34.612714+01:00", "priority": "1", "tags": [ "link1_tag1", "link1_tag2", "link1_tag3" ] }, "http://link2.com": { "author": "Author name", "description": "link_2 description...", "init date": "2014-02-10T19:59:34.612714+01:00", "priority": "5", "tags": [ "link2_tag1", "link2_tag2" ], "title": "link2_title" }, "http://link3.com": { "author": "Author name", "description": "link_3 description...", "init date": "2014-02-10T19:59:34.612714+01:00", "priority": "7", "tags": [ "link3_tag1", "link3_tag2", "link3_tag3" ], "update date": "2014-02-14T10:22:34.612714+01:00" }, "http://link4.com": { "author": "Author name", "description": "link_4 description...", "init date": "2014-02-14T10:22:34.612714+01:00", "priority": "8", "tags": ["link4_tag1"] } } """ @patch('sys.stdout', new_callable=StringIO) def test_cmd_addlinks_dump(mock_stdout): assert tty_i.dump() is True assert json.loads(mock_stdout.getvalue()) == json.loads(dump_afteradd) @patch('linkmanager.db.tqdm.tqdm', fake_tqdm) @patch('builtins.input', get_input) @patch('sys.stdout', new_callable=StringIO) @patch('arrow.now', lambda: "2014-02-15T12:20:34.612714+01:00") @patch('linkmanager.settings.AUTHOR', 'Author name') def test_cmd_updatelinks(mock_stdout): assert tty_i.updatelinks() is True assert mock_stdout.getvalue() == ''.join([ _('Give one or several links (separate with spaces)'), ' :', _('%s properties') % 'http://link1.com', ' :\n', ' ' * INDENT, _('tags (at least one, several separate with spaces)'), ' :', ' ' * INDENT, _('priority value (integer value between 1 and 10)'), ' :', ' ' * INDENT, _('give a description'), ' :', ' ' * INDENT, _('give a title'), ' :', ]) @patch('linkmanager.db.tqdm.tqdm', fake_tqdm) @patch('builtins.input', get_input) @patch('sys.stdout', new_callable=StringIO) @patch('arrow.now', lambda: "2014-02-10T19:59:34.612714+01:00") @patch('linkmanager.settings.AUTHOR', 'Author name') def test_cmd_updatelinks_with_add(mock_stdout): assert tty_i.updatelinks() is True assert mock_stdout.getvalue() == ''.join([ _('Give one or several links (separate with spaces)'), ' :', _('%s properties') % 'http://link3.com', ' :\n', ' ' * INDENT, _('tags (at least one, several separate with spaces)'), ' :', ' ' * INDENT, _('priority value (integer value between 1 and 10)'), ' :', ' ' * INDENT, _('give a description'), ' :', ' ' * INDENT, _('give a title'), ' :', ' ' * INDENT, _( 'the link "%s" does not exist: ' 'do you want to create [Y/n] ?' ) % 'http://link5.com', ' : \n', ' ' * INDENT, _( 'the link "%s" does not exist: ' 'do you want to create [Y/n] ?' ) % 'http://link6.com', ' : \n', _('%s properties') % 'http://link6.com', ' :\n', ' ' * INDENT, _('tags (at least one, several separate with spaces)'), ' :', ' ' * INDENT, _('priority value (integer value between 1 and 10)'), ' :', ' ' * INDENT, _('give a description'), ' :', ' ' * INDENT, _('give a title'), ' :' ]) dump_afterupdate = """{ "http://link1.com": { "author": "Author name", "description": "link_1 description...", "init date": "2014-02-10T19:59:34.612714+01:00", "priority": "2", "tags": [ "link1_tag1", "link1_tag3", "link1_tag4" ], "update date": "2014-02-15T12:20:34.612714+01:00" }, "http://link2.com": { "author": "Author name", "description": "link_2 description...", "init date": "2014-02-10T19:59:34.612714+01:00", "priority": "5", "tags": [ "link2_tag1", "link2_tag2" ], "title": "link2_title" }, "http://link3.com": { "author": "Author name", "description": "link_3 new description...", "init date": "2014-02-10T19:59:34.612714+01:00", "priority": "10", "tags": [ "link3_tag1", "link3_tag2", "link3_tag3" ], "update date": "2014-02-10T19:59:34.612714+01:00" }, "http://link4.com": { "author": "Author name", "description": "link_4 description...", "init date": "2014-02-14T10:22:34.612714+01:00", "priority": "8", "tags": [ "link4_tag1" ] }, "http://link6.com": { "author": "Author name", "description": "link_6 description...", "init date": "2014-02-10T19:59:34.612714+01:00", "priority": "9", "tags": [ "link6_tag1", "link6_tag2", "link6_tag3" ] } } """ @patch('sys.stdout', new_callable=StringIO) def test_cmd_updatelinks_dump(mock_stdout): assert tty_i.dump() is True assert json.loads(mock_stdout.getvalue()) == json.loads(dump_afterupdate) @patch('builtins.input', get_input) @patch('sys.stdout', new_callable=StringIO) def test_cmd_removelinks(mock_stdout): assert tty_i.removelinks() is False assert tty_i.removelinks([ "http://link5.com", "http://link6.com", "http://link7.com" ]) is True assert mock_stdout.getvalue() == ''.join([ _('Give one or several links (separate with spaces)'), ' :', _('the link "%s" does not exist.') % "http://link5.com" + '\n', _('the link "%s" has been deleted.') % "http://link6.com" + '\n', _('the link "%s" does not exist.') % "http://link7.com" + '\n' ]) dump_afterremove = """{ "http://link1.com": { "author": "Author name", "description": "link_1 description...", "init date": "2014-02-10T19:59:34.612714+01:00", "priority": "2", "tags": [ "link1_tag1", "link1_tag3", "link1_tag4" ], "update date": "2014-02-15T12:20:34.612714+01:00" }, "http://link2.com": { "author": "Author name", "description": "link_2 description...", "init date": "2014-02-10T19:59:34.612714+01:00", "priority": "5", "tags": [ "link2_tag1", "link2_tag2" ], "title": "link2_title" }, "http://link3.com": { "author": "Author name", "description": "link_3 new description...", "init date": "2014-02-10T19:59:34.612714+01:00", "priority": "10", "tags": [ "link3_tag1", "link3_tag2", "link3_tag3" ], "update date": "2014-02-10T19:59:34.612714+01:00" }, "http://link4.com": { "author": "Author name", "description": "link_4 description...", "init date": "2014-02-14T10:22:34.612714+01:00", "priority": "8", "tags": [ "link4_tag1" ] } } """ @patch('sys.stdout', new_callable=StringIO) def test_cmd_removelinks_dump(mock_stdout): assert tty_i.dump() is True assert json.loads(mock_stdout.getvalue()) == json.loads(dump_afterremove) @patch('sys.stdout', new_callable=StringIO) def test_cmd_load_null(mock_stdout): tty_i.flush(forced='forced') assert mock_stdout.getvalue() == _('Database entirely flushed.') + '\n' mock_stdout.truncate(0) mock_stdout.seek(0) # No file to load assert tty_i.load() is False assert mock_stdout.getvalue() == _('No file to load.') + '\n' first_fixture = """{ "http://linuxfr.org": { "description": "fr community ", "init date": "2014-01-27T17:45:19.985742+00:00", "priority": "8", "tags": [ "bsd", "gnu", "linux" ], "update date": "2014-01-27T17:55:19.985742+00:00" }, "http://phoronix.com": { "description": "OS benchmarkin", "init date": "2014-01-27T17:57:19.985742+00:00", "priority": "5", "tags": [ "benchmark", "linux" ], "update date": "None" }, "http://ubuntu.com": { "description": "Fr Ubuntu site", "init date": "2014-01-27T17:37:19.985742+00:00", "priority": "10", "tags": [ "linux", "python", "shell", "ubuntu" ], "update date": "None" } } """ @patch('builtins.open', mock_open(read_data=first_fixture)) @patch('os.path.isfile', lambda path: True) @patch('os.stat', lambda path: True) @patch('sys.stdout', new_callable=StringIO) def test_cmd_one_load(mock_stdout): import sys tty_i.flush(forced='forced') with open('fake.json', 'w') as f: f.write(mock_stdout.getvalue() + "\n#######\n") # One file assert tty_i.load(['file.json']) is True @patch('sys.stdout', new_callable=StringIO) def test_cmd_dump_after_one_load(mock_stdout): tty_i.dump() assert json.loads(mock_stdout.getvalue()) == json.loads(first_fixture) second_fixture = """{ "http://phoronix.com": { "description": "OS benchmarkin", "init date": "2014-01-27T17:57:19.985742+00:00", "priority": "5", "tags": [ "benchmark", "linux" ] } } """ third_fixture = """{ "http://ubuntu.com": { "description": "Fr Ubuntu site", "init date": "2014-01-27T17:37:19.985742+00:00", "priority": "10", "tags": [ "linux", "python", "shell", "ubuntu" ] } } """ fourth_fixture = """{ "http://linuxfr.org": { "description": "fr community ", "init date": "2014-01-27T17:45:19.985742+00:00", "priority": "8", "tags": [ "bsd", "gnu", "linux" ], "update date": "2014-01-27T17:55:19.985742+00:00" }, "http://xkcd.com": { "description": "A webcomic of romance ...", "init date": "2014-02-06T17:37:19.985742+00:00", "priority": "5", "tags": [ "bsd", "joke", "linux", "math" ] } } """ fifth_fixture = """{ "http://linuxfr.org": { "description": "fr community ", "init date": "2014-01-27T17:45:19.985742+00:00", "priority": "8", "tags": [ "bsd", "gnu", "linux" ], "update date": "2014-01-27T17:55:19.985742+00:00" }, "http://phoronix.com": { "description": "OS benchmarkin", "init date": "2014-01-27T17:57:19.985742+00:00", "priority": "5", "tags": [ "benchmark", "linux" ] }, "http://ubuntu.com": { "description": "Fr Ubuntu site", "init date": "2014-01-27T17:37:19.985742+00:00", "priority": "10", "tags": [ "linux", "python", "shell", "ubuntu" ] }, "http://xkcd.com": { "description": "A webcomic of romance ...", "init date": "2014-02-06T17:37:19.985742+00:00", "priority": "5", "tags": [ "bsd", "joke", "linux", "math" ] } } """ files = iter([second_fixture, third_fixture, fourth_fixture]) def multi_mock_open(mock=None, read_data=''): """ Inspiration by the mock_open function and http://stackoverflow.com/questions/9349122/python-mock-mocking-several-open """ import _io file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) if mock is None: mock = MagicMock(name='open', spec=open) handle = MagicMock(spec=file_spec) handle.write.return_value = None handle.__enter__.return_value = handle handle.read.side_effect = lambda: next(files) mock.return_value = handle return mock @patch('os.path.isfile', lambda path: True) @patch('os.stat', lambda path: True) @patch('builtins.open', multi_mock_open()) @patch('sys.stdout', new_callable=StringIO) def test_cmd_multi_load(mock_stdout): tty_i.flush(forced='forced') # Several files assert tty_i.load(json_files=[ 'file_1.json', 'file_2.json', 'file_3.json' ]) is True @patch('sys.stdout', new_callable=StringIO) def test_cmd_dump_after_multi_load(mock_stdout): assert tty_i.dump() is True assert json.loads(mock_stdout.getvalue()) == json.loads(fifth_fixture) @patch('sys.stdout', new_callable=StringIO) def test_cmd_searchlinks_allresult(mock_stdout): # def test_cmd_searchlinks_allresult(): assert tty_i.searchlinks() is True assert mock_stdout.getvalue() == ''.join([ _('%s links totally founded') % '4', ' : \n', ' ' * INDENT, ' 1 ➤ http://ubuntu.com\n', ' ' * INDENT, ' 2 ➤ http://linuxfr.org\n', ' ' * INDENT, ' 3 ➤ http://phoronix.com\n', ' ' * INDENT, ' 4 ➤ http://xkcd.com\n' ]) @patch('sys.stdout', new_callable=StringIO) def test_cmd_searchlinks_noresult(mock_stdout): assert tty_i.searchlinks(['nothing']) is False assert mock_stdout.getvalue() == _('No links founded') + '. \n' @patch('sys.stdout', new_callable=StringIO) def test_cmd_searchlinks(mock_stdout): assert tty_i.searchlinks(['bsd']) is True assert mock_stdout.getvalue() == ''.join([ _('%s links founded') % '2', ' : \n', ' ' * INDENT, ' 1 ➤ http://linuxfr.org\n', ' ' * INDENT, ' 2 ➤ http://xkcd.com\n' ]) # def test_print_dump(): # print('\n') # tty_i.dump() # with open('fake.json', 'w') as f: # f.write(mock_stdout.getvalue())
bsd-2-clause
5,549,153,208,129,339,000
27.43232
85
0.52548
false
pytlakp/intranetref
src/intranet3/views/scrum/sprint.py
1
11968
import json import datetime import markdown from pyramid.view import view_config from pyramid.httpexceptions import HTTPFound, HTTPForbidden, HTTPNotFound from pyramid.response import Response from intranet3.utils.views import BaseView from intranet3.forms.scrum import SprintForm from intranet3.forms.common import DeleteForm from intranet3.models import Sprint, ApplicationConfig, Tracker, User, Project from intranet3 import helpers as h from intranet3.log import INFO_LOG, ERROR_LOG from intranet3.lib.scrum import SprintWrapper, get_velocity_chart_data, move_blocked_to_the_end from intranet3.lib.times import TimesReportMixin, Row from intranet3.lib.bugs import Bugs from intranet3.forms.times import ProjectTimeForm from intranet3.forms.scrum import SprintListFilterForm LOG = INFO_LOG(__name__) ERROR = ERROR_LOG(__name__) @view_config(route_name='scrum_sprint_list', permission='client') class List(BaseView): def get(self): client = self.request.user.get_client() form = SprintListFilterForm(self.request.GET, client=client) active_only = form.active_only.data limit = form.limit.data or 10 project = None sprints = Sprint.query.order_by(Sprint.start.desc()) all_sprints = None if form.project_id.data and form.project_id.data != 'None': project_id = int(form.project_id.data) project = Project.query.get(project_id) sprints = sprints.filter(Sprint.project_id == project_id) all_sprints = Sprint.query.order_by(Sprint.start)\ .filter(Sprint.project_id == project_id) if client: sprints = sprints.filter(Sprint.client_id == client.id) if all_sprints: all_sprints = all_sprints.filter(Sprint.client_id == client.id) if active_only: sprints = sprints.filter(Sprint.end >= datetime.date.today()) if limit: sprints.limit(limit) sprints = sprints.all() if all_sprints: velocity_chart_data = get_velocity_chart_data(all_sprints) if sprints: stats = dict( worked_hours=sum([s.worked_hours for s in sprints]) / len(sprints), achieved=sum([s.achieved_points for s in sprints]) / len(sprints), commited=sum([s.commited_points for s in sprints]) / len(sprints), velocity=sum([s.velocity for s in sprints]) / len(sprints), ) else: stats = None return dict( sprints=sprints, form=form, velocity_chart_data=velocity_chart_data if all_sprints else None, stats=stats, project=project ) class FetchBugsMixin(object): def _fetch_bugs(self, sprint): config_obj = ApplicationConfig.get_current_config() user = User.query.get(config_obj.hours_ticket_user_id) bugs = Bugs(self.request, user).get_sprint(sprint) return bugs class ClientProtectionMixin(object): def protect(self): if not self.request.is_user_in_group('client'): return sprint_id = self.request.GET.get('sprint_id') sprint = Sprint.query.get(sprint_id) client = self.request.user.get_client() self.v['sprint'] = sprint self.v['client'] = client if client.id != sprint.client_id: raise HTTPForbidden() @view_config(route_name='scrum_sprint_field', permission='client') class Field(ClientProtectionMixin, BaseView): def get(self): field = self.request.GET.get('field') sprint_id = self.request.GET.get('sprint_id') sprint = Sprint.query.get(sprint_id) if field == 'retrospective_note': result = sprint.retrospective_note header = 'Retrospective note' else: raise HTTPNotFound md = markdown.Markdown() result = md.convert(result) result = '<h2 class="content-header">%s</h2>%s' % (header, result) return Response(result) class BaseSprintView(BaseView): def tmpl_ctx(self): session = self.session sprint = self.v.get('sprint') if not sprint: sprint_id = self.request.GET.get('sprint_id') sprint = Sprint.query.get(sprint_id) project = Project.query.get(sprint.project_id) self.v['project'] = project self.v['sprint'] = sprint last_sprint = session.query(Sprint)\ .filter(Sprint.project_id==sprint.project_id)\ .filter(Sprint.start<sprint.start)\ .order_by(Sprint.start.desc()).first() return dict( project=project, sprint=sprint, last_sprint=last_sprint, ) @view_config(route_name='scrum_sprint_show', permission='client') class Show(ClientProtectionMixin, FetchBugsMixin, BaseSprintView): def get(self): sprint = self.v['sprint'] bugs = self._fetch_bugs(sprint) bugs = sorted(bugs, cmp=h.sorting_by_priority) bugs = move_blocked_to_the_end(bugs) tracker = Tracker.query.get(sprint.project.tracker_id) sw = SprintWrapper(sprint, bugs, self.request) return dict( tracker=tracker, bugs=sw.bugs, info=sw.get_info(), ) @view_config(route_name='scrum_sprint_board', permission='client') class Board(ClientProtectionMixin, FetchBugsMixin, BaseSprintView): def get(self): sprint = self.v['sprint'] bugs = self._fetch_bugs(sprint) sw = SprintWrapper(sprint, bugs, self.request) board = sw.get_board() return dict( board=board, info=sw.get_info(), bug_list_url=lambda bugs_list: sprint.project.get_bug_list_url( [bug.id for bugs in bugs_list.values() for bug in bugs] ), ) @view_config(route_name='scrum_sprint_times', permission='client') class Times(ClientProtectionMixin, TimesReportMixin, FetchBugsMixin, BaseSprintView): def dispatch(self): sprint = self.v['sprint'] bugs = self._fetch_bugs(sprint) sw = SprintWrapper(sprint, bugs, self.request) client = self.request.user.get_client() form = ProjectTimeForm(self.request.GET, client=client) if not self.request.GET.get('submited'): # ugly hack form.group_by_bugs.data = True form.group_by_user.data = True if not form.validate(): return dict(form=form, sprint=sprint) group_by = True, True, form.group_by_bugs.data, form.group_by_user.data uber_query = self._prepare_uber_query_for_sprint(sprint, bugs) entries = uber_query.all() if self.request.GET.get('excel'): from intranet3.lib.times import dump_entries_to_excel file, response = dump_entries_to_excel(entries) return response entries_sum = sum([e[-1] for e in entries]) participation_of_workers = self._get_participation_of_workers(entries) tickets_id = ','.join([str(e[2]) for e in entries]) trackers_id = ','.join([str(e[4].id) for e in entries]) rows = Row.from_ordered_data(entries, group_by) return dict( rows=rows, entries_sum=entries_sum, form=form, info=sw.get_info(), participation_of_workers=participation_of_workers, participation_of_workers_sum=sum([time[1] for time in participation_of_workers]), trackers_id=trackers_id, tickets_id=tickets_id, ) @view_config(route_name='scrum_sprint_charts', permission='client') class Charts(ClientProtectionMixin, FetchBugsMixin, BaseSprintView): def get(self): sprint = self.v['sprint'] bugs = self._fetch_bugs(sprint) sw = SprintWrapper(sprint, bugs, self.request) burndown = sw.get_burndown_data() tracker = Tracker.query.get(sprint.project.tracker_id) entries, sum_ = sw.get_worked_hours() entries.insert(0, ('Employee', 'Time')) piechart_data = json.dumps(entries) return dict( tracker=tracker, bugs=bugs, charts_data=json.dumps(burndown), piechart_data=piechart_data, info=sw.get_info(), ) @view_config(route_name='scrum_sprint_retros', permission='client') class Retros(ClientProtectionMixin, FetchBugsMixin, BaseSprintView): def get(self): session = self.session sprint = self.v['sprint'] bugs = self._fetch_bugs(sprint) sw = SprintWrapper(sprint, bugs, self.request) sprints = session.query(Sprint) \ .filter(Sprint.project_id==sprint.project_id) \ .order_by(Sprint.start.desc()) return dict( bugs=bugs, info=sw.get_info(), sprints=sprints, ) @view_config(route_name='scrum_sprint_edit', permission='coordinator') class Edit(BaseView): def dispatch(self): sprint_id = self.request.GET.get('sprint_id') sprint = Sprint.query.get(sprint_id) form = SprintForm(self.request.POST, obj=sprint) if self.request.method == 'POST' and form.validate(): project_id = int(form.project_id.data) project = Project.query.get(project_id) sprint.name = form.name.data sprint.client_id = project.client_id sprint.project_id = project.id sprint.start = form.start.data sprint.end = form.end.data sprint.goal = form.goal.data sprint.retrospective_note = form.retrospective_note.data self.session.add(sprint) self.flash(self._(u"Sprint edited")) LOG(u"Sprint edited") url = self.request.url_for('/scrum/sprint/show', sprint_id=sprint.id) return HTTPFound(location=url) return dict( form=form, sprint=sprint ) @view_config(route_name='scrum_sprint_add', permission='coordinator') class Add(BaseView): def dispatch(self): form = SprintForm(self.request.POST) if self.request.method == 'POST' and form.validate(): project_id = int(form.project_id.data) project = Project.query.get(project_id) sprint = Sprint( name=form.name.data, client_id=project.client_id, project_id=project.id, start=form.start.data, end=form.end.data, goal=form.goal.data, retrospective_note = form.retrospective_note.data, ) self.session.add(sprint) self.session.flush() self.flash(self._(u"New sprint added")) LOG(u"Sprint added") url = self.request.url_for('/scrum/sprint/show', sprint_id=sprint.id) return HTTPFound(location=url) return dict( form=form ) @view_config(route_name='scrum_sprint_delete', renderer='intranet3:templates/common/delete.html', permission='coordinator') class Delete(BaseView): def dispatch(self): sprint_id = self.request.GET.get('sprint_id') sprint = Sprint.query.get(sprint_id) form = DeleteForm(self.request.POST) if self.request.method == 'POST' and form.validate(): self.session.delete(sprint) back_url = self.request.url_for('/scrum/sprint/list') return HTTPFound(location=back_url) return dict( type_name=u'sprint', title=u'%s' % sprint.name, url=self.request.url_for('/scrum/sprint/delete', sprint_id=sprint.id), back_url=self.request.url_for('/scrum/sprint/list'), form=form )
mit
3,481,813,588,570,232,000
34.725373
95
0.601939
false
viktorTarasov/PyKMIP
kmip/tests/unit/core/messages/contents/test_protocol_version.py
1
9321
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from testtools import TestCase from kmip.core.messages.contents import ProtocolVersion from kmip.core.utils import BytearrayStream class TestProtocolVersion(TestCase): def setUp(self): super(TestProtocolVersion, self).setUp() self.major_default = ProtocolVersion.ProtocolVersionMajor() self.minor_default = ProtocolVersion.ProtocolVersionMinor() self.major = ProtocolVersion.ProtocolVersionMajor(1) self.minor = ProtocolVersion.ProtocolVersionMinor(1) self.encoding_default = BytearrayStream(( b'\x42\x00\x69\x01\x00\x00\x00\x20\x42\x00\x6A\x02\x00\x00\x00\x04' b'\x00\x00\x00\x00\x00\x00\x00\x00\x42\x00\x6B\x02\x00\x00\x00\x04' b'\x00\x00\x00\x00\x00\x00\x00\x00')) self.encoding = BytearrayStream(( b'\x42\x00\x69\x01\x00\x00\x00\x20\x42\x00\x6A\x02\x00\x00\x00\x04' b'\x00\x00\x00\x01\x00\x00\x00\x00\x42\x00\x6B\x02\x00\x00\x00\x04' b'\x00\x00\x00\x01\x00\x00\x00\x00')) def tearDown(self): super(TestProtocolVersion, self).tearDown() def _test_init(self, protocol_version_major, protocol_version_minor): protocol_version = ProtocolVersion( protocol_version_major, protocol_version_minor) if protocol_version_major is None: self.assertEqual(ProtocolVersion.ProtocolVersionMajor(), protocol_version.protocol_version_major) else: self.assertEqual(protocol_version_major, protocol_version.protocol_version_major) if protocol_version_minor is None: self.assertEqual(ProtocolVersion.ProtocolVersionMinor(), protocol_version.protocol_version_minor) else: self.assertEqual(protocol_version_minor, protocol_version.protocol_version_minor) def test_init_with_none(self): self._test_init(None, None) def test_init_with_args(self): major = ProtocolVersion.ProtocolVersionMajor(1) minor = ProtocolVersion.ProtocolVersionMinor(0) self._test_init(major, minor) def test_validate_on_invalid_protocol_version_major(self): major = "invalid" minor = ProtocolVersion.ProtocolVersionMinor(0) args = [major, minor] self.assertRaisesRegexp( TypeError, "invalid protocol version major", self._test_init, *args) def test_validate_on_invalid_protocol_version_minor(self): major = ProtocolVersion.ProtocolVersionMajor(1) minor = "invalid" args = [major, minor] self.assertRaisesRegexp( TypeError, "invalid protocol version minor", self._test_init, *args) def _test_read(self, stream, major, minor): protocol_version = ProtocolVersion() protocol_version.read(stream) msg = "protocol version major decoding mismatch" msg += "; expected {0}, received {1}".format( major, protocol_version.protocol_version_major) self.assertEqual(major, protocol_version.protocol_version_major, msg) msg = "protocol version minor decoding mismatch" msg += "; expected {0}, received {1}".format( minor, protocol_version.protocol_version_minor) self.assertEqual(minor, protocol_version.protocol_version_minor, msg) def test_read_with_none(self): self._test_read(self.encoding_default, self.major_default, self.minor_default) def test_read_with_args(self): self._test_read(self.encoding, self.major, self.minor) def _test_write(self, stream_expected, major, minor): stream_observed = BytearrayStream() protocol_version = ProtocolVersion(major, minor) protocol_version.write(stream_observed) length_expected = len(stream_expected) length_observed = len(stream_observed) msg = "encoding lengths not equal" msg += "; expected {0}, received {1}".format( length_expected, length_observed) self.assertEqual(length_expected, length_observed, msg) msg = "encoding mismatch" msg += ";\nexpected:\n{0}\nreceived:\n{1}".format( stream_expected, stream_observed) self.assertEqual(stream_expected, stream_observed, msg) def test_write_with_none(self): self._test_write(self.encoding_default, self.major_default, self.minor_default) def test_write_with_args(self): self._test_write(self.encoding, self.major, self.minor) def test_equal_on_equal(self): a = ProtocolVersion.create(1, 0) b = ProtocolVersion.create(1, 0) self.assertTrue(a == b) def test_equal_on_not_equal(self): a = ProtocolVersion.create(1, 0) b = ProtocolVersion.create(0, 1) self.assertFalse(a == b) def test_equal_on_type_mismatch(self): a = ProtocolVersion.create(1, 0) b = "invalid" self.assertFalse(a == b) def test_not_equal_on_equal(self): a = ProtocolVersion.create(1, 0) b = ProtocolVersion.create(1, 0) self.assertFalse(a != b) def test_not_equal_on_not_equal(self): a = ProtocolVersion.create(1, 0) b = ProtocolVersion.create(0, 1) self.assertTrue(a != b) def test_not_equal_on_type_mismatch(self): a = ProtocolVersion.create(1, 0) b = "invalid" self.assertTrue(a != b) def test_less_than(self): """ Test that the less than operator returns True/False when comparing two different ProtocolVersions. """ a = ProtocolVersion.create(1, 0) b = ProtocolVersion.create(1, 1) c = ProtocolVersion.create(2, 0) d = ProtocolVersion.create(0, 2) self.assertTrue(a < b) self.assertFalse(b < a) self.assertFalse(a < a) self.assertTrue(a < c) self.assertFalse(c < a) self.assertFalse(c < d) self.assertTrue(d < c) def test_greater_than(self): """ Test that the greater than operator returns True/False when comparing two different ProtocolVersions. """ a = ProtocolVersion.create(1, 0) b = ProtocolVersion.create(1, 1) c = ProtocolVersion.create(2, 0) d = ProtocolVersion.create(0, 2) self.assertFalse(a > b) self.assertTrue(b > a) self.assertFalse(a > a) self.assertFalse(a > c) self.assertTrue(c > a) self.assertTrue(c > d) self.assertFalse(d > c) def test_less_than_or_equal(self): """ Test that the less than or equal operator returns True/False when comparing two different ProtocolVersions. """ a = ProtocolVersion.create(1, 0) b = ProtocolVersion.create(1, 1) c = ProtocolVersion.create(2, 0) d = ProtocolVersion.create(0, 2) self.assertTrue(a <= b) self.assertFalse(b <= a) self.assertTrue(a <= a) self.assertTrue(a <= c) self.assertFalse(c <= a) self.assertFalse(c <= d) self.assertTrue(d <= c) def test_greater_than_or_equal(self): """ Test that the greater than or equal operator returns True/False when comparing two different ProtocolVersions. """ a = ProtocolVersion.create(1, 0) b = ProtocolVersion.create(1, 1) c = ProtocolVersion.create(2, 0) d = ProtocolVersion.create(0, 2) self.assertFalse(a >= b) self.assertTrue(b >= a) self.assertTrue(a >= a) self.assertFalse(a >= c) self.assertTrue(c >= a) self.assertTrue(c >= d) self.assertFalse(d >= c) def test_repr(self): a = ProtocolVersion.create(1, 0) self.assertEqual("1.0", "{0}".format(a)) def _test_create(self, major, minor): protocol_version = ProtocolVersion.create(major, minor) if major is None: expected = ProtocolVersion.ProtocolVersionMajor() else: expected = ProtocolVersion.ProtocolVersionMajor(major) self.assertEqual(expected, protocol_version.protocol_version_major) if minor is None: expected = ProtocolVersion.ProtocolVersionMinor() else: expected = ProtocolVersion.ProtocolVersionMinor(minor) self.assertEqual(expected, protocol_version.protocol_version_minor) def test_create_with_none(self): self._test_create(None, None) def test_create_with_args(self): self._test_create(1, 0)
apache-2.0
-7,470,747,633,934,984,000
33.522222
79
0.62622
false
google/dotty
efilter_tests/unit/protocols/repeated.py
1
3524
# EFILTER Forensic Query Language # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ EFILTER test suite. """ __author__ = "Adam Sindelar <adamsh@google.com>" import six import unittest from efilter.protocols import repeated class RepeatedTest(unittest.TestCase): def assertValueEq(self, x, y): return self.assertTrue(repeated.value_eq(x, y)) def testCreation(self): """Test that creation is reasonable.""" # This should make a repeated var of two values. r = repeated.repeated("foo", "bar") # It should be a repeated var. self.assertIsInstance(r, repeated.IRepeated) # And also have more than one value. self.assertTrue(repeated.isrepeating(r)) # Repeating a single value will still create a repeated var. r = repeated.repeated("foo") self.assertIsInstance(r, repeated.IRepeated) # But it won't be repeating (have more than one value). self.assertFalse(repeated.isrepeating(r)) # Using meld will just return a scalar on one value. r = repeated.meld("foo") self.assertIsInstance(r, six.string_types) # Meld on two values has the same behavior as repeated. r = repeated.meld("foo", "foo") self.assertIsInstance(r, repeated.IRepeated) def testNulls(self): r = None for _ in repeated.getvalues(r): # Should be zero elements but not raise. self.assertFail() r = repeated.meld(None, None) # None should get skipped. for _ in repeated.getvalues(r): self.assertFail() def testTypes(self): """Test that types are correctly derived and enforced.""" with self.assertRaises(TypeError): repeated.repeated(1, "foo") with self.assertRaises(TypeError): repeated.meld(1, "foo") def testNesting(self): """Test that repeated vars remain flat.""" r = repeated.repeated("foo", "bar") r = repeated.repeated(r, "baz") self.assertValueEq(repeated.repeated("foo", "bar", "baz"), r) r = repeated.repeated("zoo", r) self.assertValueEq(repeated.repeated("zoo", "foo", "bar", "baz"), r) # value_eq should ignore order. self.assertValueEq(repeated.repeated("bar", "foo", "baz", "zoo"), r) # Order should be preserved for getvalues, though. self.assertEqual(repeated.getvalues(r), ["zoo", "foo", "bar", "baz"]) self.assertEqual(repeated.value_type(r), type("foo")) def testApplication(self): """Test function application across values.""" self.assertEqual( repeated.repeated(2, 4), repeated.value_apply( repeated.repeated(1, 2), lambda x: x * 2)) # As everything working on values, this should also work on scalars. applied = repeated.value_apply(5, lambda x: x * 2) self.assertValueEq(10, applied)
apache-2.0
-5,256,765,739,999,268,000
33.54902
77
0.637911
false
ikumen/project-euler
solutions/016.py
1
1723
#!/usr/bin/env python ''' 016.py: https://projecteuler.net/problem=16 Power digit sum 2^15 = 32768 and the sum of its digits is 3 + 2 + 7 + 6 + 8 = 26. What is the sum of the digits of the number 2^1000? ''' import os import pytest import time import math def power_digit_sum(N, P): """ Calculates the sum of digits for a number 'N' raise to power 'P'. Basic algorithm is to apply long multiplication, storing the results in two arrays, one for current digits, and other a tmp. N must be between 2 <= N <= 9 """ # To calculate the size of the array that can hold all our digits, I used # the following formula (P * Log10(N)) if N > 9 or N < 2 or P < 1: return None d_size = math.ceil(P * math.log(N, 10)) digits = [None] * d_size tmp_digits = [None] * d_size # Set our ones column for long multiplication, and assign our first value ones_place = d_size - 1 digits[ones_place] = N # Multiply N P-1 times, since we set our initial N in ones_place for i in range(1, P): j = ones_place carry = 0 while digits[j] != None and j >= 0: product = carry + (digits[j] * N) if product >= 10: tmp_digits[j] = product % 10 carry = math.floor(product / 10) tmp_digits[j-1] = carry else: tmp_digits[j] = product carry = 0 j -= 1 tmp = digits digits = tmp_digits tmp_digits = tmp return sum(filter(None, digits)) def test_solution(): '''Test''' assert 25 == power_digit_sum(5, 8) assert 26 == power_digit_sum(2, 15) def main(): '''Main runner, delegates to solution.''' #4,782,969 # 5, 3, 1, 4, 4, 1 print(power_digit_sum(2, 1000)) if __name__ == '__main__': start_time = time.time() main() print("--- %s seconds ---" % (time.time() - start_time))
mit
8,554,294,037,166,094,000
21.671053
74
0.631457
false
cjaymes/pyscap
src/scap/model/oval_5/defs/independent/VariableStateElement.py
1
1216
# Copyright 2016 Casey Jaymes # This file is part of PySCAP. # # PySCAP is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PySCAP is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PySCAP. If not, see <http://www.gnu.org/licenses/>. import logging from scap.model.oval_5.defs.independent.StateType import StateType logger = logging.getLogger(__name__) class VariableStateElement(StateType): MODEL_MAP = { 'tag_name': 'variable_state', 'elements': [ {'tag_name': 'var_ref', 'class': 'scap.model.oval_5.defs.EntityStateType', 'min': 0, 'max': 1, 'value_pattern': r'(oval:[A-Za-z0-9_\-\.]+:var:[1-9][0-9]*){0,}'}, {'tag_name': 'value', 'class': 'scap.model.oval_5.defs.EntityStateType', 'min': 0, 'max': 1}, ], }
gpl-3.0
-5,975,244,660,325,065,000
39.533333
173
0.683388
false
Kotaimen/stonemason
tests/formatbundle/test_tileformat.py
1
1177
# -*- encoding: utf-8 -*- __author__ = 'kotaimen' __date__ = '2/19/15' import unittest from stonemason.formatbundle import TileFormat, InvalidTileFormat class TestTileFormat(unittest.TestCase): def test_init(self): fmt = TileFormat(format='JPEG') self.assertEqual(fmt.format, 'JPEG') self.assertEqual(fmt.mimetype, 'image/jpeg') self.assertEqual(fmt.extension, '.jpg') self.assertEqual(fmt.parameters, {}) def test_repr(self): self.assertEqual(str(TileFormat('JPEG')), 'TileFormat(JPEG|image/jpeg|.jpg)') def test_init2(self): fmt = TileFormat(format='JPEG', mimetype='image/jpg', extension='.jpeg', parameters={'quality': 80, 'optimized': True}) self.assertEqual(fmt.format, 'JPEG') self.assertEqual(fmt.mimetype, 'image/jpg') self.assertEqual(fmt.extension, '.jpeg') self.assertDictEqual(fmt.parameters, {'quality': 80, 'optimized': True}) def test_initfail(self): self.assertRaises(InvalidTileFormat, TileFormat, format='foobar') if __name__ == '__main__': unittest.main()
mit
-7,065,150,623,025,478,000
30.810811
80
0.610025
false
bhupennewalkar1337/erpnext
erpnext/accounts/doctype/purchase_invoice/purchase_invoice.py
1
26311
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import cint, formatdate, flt, getdate from frappe import _, throw from erpnext.setup.utils import get_company_currency import frappe.defaults from erpnext.controllers.buying_controller import BuyingController from erpnext.accounts.party import get_party_account, get_due_date from erpnext.accounts.utils import get_account_currency, get_fiscal_year from erpnext.stock.doctype.purchase_receipt.purchase_receipt import update_billed_amount_based_on_po from erpnext.controllers.stock_controller import get_warehouse_account from erpnext.accounts.general_ledger import make_gl_entries, merge_similar_entries, delete_gl_entries from erpnext.accounts.doctype.gl_entry.gl_entry import update_outstanding_amt form_grid_templates = { "items": "templates/form_grid/item_grid.html" } class PurchaseInvoice(BuyingController): def __init__(self, arg1, arg2=None): super(PurchaseInvoice, self).__init__(arg1, arg2) self.status_updater = [{ 'source_dt': 'Purchase Invoice Item', 'target_dt': 'Purchase Order Item', 'join_field': 'po_detail', 'target_field': 'billed_amt', 'target_parent_dt': 'Purchase Order', 'target_parent_field': 'per_billed', 'target_ref_field': 'amount', 'source_field': 'amount', 'percent_join_field': 'purchase_order', 'overflow_type': 'billing' }] def validate(self): if not self.is_opening: self.is_opening = 'No' super(PurchaseInvoice, self).validate() if not self.is_return: self.po_required() self.pr_required() self.validate_supplier_invoice() # validate cash purchase if (self.is_paid == 1): self.validate_cash() self.check_conversion_rate() self.validate_credit_to_acc() self.clear_unallocated_advances("Purchase Invoice Advance", "advances") self.check_for_closed_status() self.validate_with_previous_doc() self.validate_uom_is_integer("uom", "qty") self.set_expense_account(for_validate=True) self.set_against_expense_account() self.validate_write_off_account() self.validate_multiple_billing("Purchase Receipt", "pr_detail", "amount", "items") self.validate_fixed_asset() self.validate_fixed_asset_account() self.create_remarks() self.set_status() def validate_cash(self): if not self.cash_bank_account and flt(self.paid_amount): frappe.throw(_("Cash or Bank Account is mandatory for making payment entry")) if flt(self.paid_amount) + flt(self.write_off_amount) \ - flt(self.grand_total) > 1/(10**(self.precision("base_grand_total") + 1)): frappe.throw(_("""Paid amount + Write Off Amount can not be greater than Grand Total""")) def create_remarks(self): if not self.remarks: if self.bill_no and self.bill_date: self.remarks = _("Against Supplier Invoice {0} dated {1}").format(self.bill_no, formatdate(self.bill_date)) else: self.remarks = _("No Remarks") def set_missing_values(self, for_validate=False): if not self.credit_to: self.credit_to = get_party_account("Supplier", self.supplier, self.company) if not self.due_date: self.due_date = get_due_date(self.posting_date, "Supplier", self.supplier, self.company) super(PurchaseInvoice, self).set_missing_values(for_validate) def check_conversion_rate(self): default_currency = get_company_currency(self.company) if not default_currency: throw(_('Please enter default currency in Company Master')) if (self.currency == default_currency and flt(self.conversion_rate) != 1.00) or not self.conversion_rate or (self.currency != default_currency and flt(self.conversion_rate) == 1.00): throw(_("Conversion rate cannot be 0 or 1")) def validate_credit_to_acc(self): account = frappe.db.get_value("Account", self.credit_to, ["account_type", "report_type", "account_currency"], as_dict=True) if account.report_type != "Balance Sheet": frappe.throw(_("Credit To account must be a Balance Sheet account")) if self.supplier and account.account_type != "Payable": frappe.throw(_("Credit To account must be a Payable account")) self.party_account_currency = account.account_currency def check_for_closed_status(self): check_list = [] pc_obj = frappe.get_doc('Purchase Common') for d in self.get('items'): if d.purchase_order and not d.purchase_order in check_list and not d.purchase_receipt: check_list.append(d.purchase_order) pc_obj.check_for_closed_status('Purchase Order', d.purchase_order) def validate_with_previous_doc(self): super(PurchaseInvoice, self).validate_with_previous_doc({ "Purchase Order": { "ref_dn_field": "purchase_order", "compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]], }, "Purchase Order Item": { "ref_dn_field": "po_detail", "compare_fields": [["project", "="], ["item_code", "="], ["uom", "="]], "is_child_table": True, "allow_duplicate_prev_row_id": True }, "Purchase Receipt": { "ref_dn_field": "purchase_receipt", "compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]], }, "Purchase Receipt Item": { "ref_dn_field": "pr_detail", "compare_fields": [["project", "="], ["item_code", "="], ["uom", "="]], "is_child_table": True } }) if cint(frappe.db.get_single_value('Buying Settings', 'maintain_same_rate')) and not self.is_return: self.validate_rate_with_reference_doc([ ["Purchase Order", "purchase_order", "po_detail"], ["Purchase Receipt", "purchase_receipt", "pr_detail"] ]) def validate_warehouse(self): if self.update_stock: for d in self.get('items'): if not d.warehouse: frappe.throw(_("Warehouse required at Row No {0}").format(d.idx)) super(PurchaseInvoice, self).validate_warehouse() def set_expense_account(self, for_validate=False): auto_accounting_for_stock = cint(frappe.defaults.get_global_default("auto_accounting_for_stock")) if auto_accounting_for_stock: stock_not_billed_account = self.get_company_default("stock_received_but_not_billed") stock_items = self.get_stock_items() if self.update_stock: self.validate_warehouse() warehouse_account = get_warehouse_account() for item in self.get("items"): # in case of auto inventory accounting, # expense account is always "Stock Received But Not Billed" for a stock item # except epening entry, drop-ship entry and fixed asset items if auto_accounting_for_stock and item.item_code in stock_items \ and self.is_opening == 'No' and not item.is_fixed_asset \ and (not item.po_detail or not frappe.db.get_value("Purchase Order Item", item.po_detail, "delivered_by_supplier")): if self.update_stock: item.expense_account = warehouse_account[item.warehouse]["name"] else: item.expense_account = stock_not_billed_account elif not item.expense_account and for_validate: throw(_("Expense account is mandatory for item {0}").format(item.item_code or item.item_name)) def set_against_expense_account(self): against_accounts = [] for item in self.get("items"): if item.expense_account not in against_accounts: against_accounts.append(item.expense_account) self.against_expense_account = ",".join(against_accounts) def po_required(self): if frappe.db.get_value("Buying Settings", None, "po_required") == 'Yes': for d in self.get('items'): if not d.purchase_order: throw(_("Purchse Order number required for Item {0}").format(d.item_code)) def pr_required(self): stock_items = self.get_stock_items() if frappe.db.get_value("Buying Settings", None, "pr_required") == 'Yes': for d in self.get('items'): if not d.purchase_receipt and d.item_code in stock_items: throw(_("Purchase Receipt number required for Item {0}").format(d.item_code)) def validate_write_off_account(self): if self.write_off_amount and not self.write_off_account: throw(_("Please enter Write Off Account")) def check_prev_docstatus(self): for d in self.get('items'): if d.purchase_order: submitted = frappe.db.sql("select name from `tabPurchase Order` where docstatus = 1 and name = %s", d.purchase_order) if not submitted: frappe.throw(_("Purchase Order {0} is not submitted").format(d.purchase_order)) if d.purchase_receipt: submitted = frappe.db.sql("select name from `tabPurchase Receipt` where docstatus = 1 and name = %s", d.purchase_receipt) if not submitted: frappe.throw(_("Purchase Receipt {0} is not submitted").format(d.purchase_receipt)) def update_status_updater_args(self): if cint(self.update_stock): self.status_updater.extend([{ 'source_dt': 'Purchase Invoice Item', 'target_dt': 'Purchase Order Item', 'join_field': 'po_detail', 'target_field': 'received_qty', 'target_parent_dt': 'Purchase Order', 'target_parent_field': 'per_received', 'target_ref_field': 'qty', 'source_field': 'qty', 'percent_join_field':'purchase_order', # 'percent_join_field': 'prevdoc_docname', 'overflow_type': 'receipt', 'extra_cond': """ and exists(select name from `tabPurchase Invoice` where name=`tabPurchase Invoice Item`.parent and update_stock = 1)""" }, { 'source_dt': 'Purchase Invoice Item', 'target_dt': 'Purchase Order Item', 'join_field': 'po_detail', 'target_field': 'returned_qty', 'target_parent_dt': 'Purchase Order', # 'target_parent_field': 'per_received', # 'target_ref_field': 'qty', 'source_field': '-1 * qty', # 'percent_join_field': 'prevdoc_docname', # 'overflow_type': 'receipt', 'extra_cond': """ and exists (select name from `tabPurchase Invoice` where name=`tabPurchase Invoice Item`.parent and update_stock=1 and is_return=1)""" } ]) def validate_purchase_receipt_if_update_stock(self): if self.update_stock: for item in self.get("items"): if item.purchase_receipt: frappe.throw(_("Stock cannot be updated against Purchase Receipt {0}") .format(item.purchase_receipt)) def on_submit(self): self.check_prev_docstatus() self.update_status_updater_args() frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.base_grand_total) if not self.is_return: self.update_against_document_in_jv() self.update_prevdoc_status() self.update_billing_status_for_zero_amount_refdoc("Purchase Order") self.update_billing_status_in_pr() # Updating stock ledger should always be called after updating prevdoc status, # because updating ordered qty in bin depends upon updated ordered qty in PO if self.update_stock == 1: self.update_stock_ledger() from erpnext.stock.doctype.serial_no.serial_no import update_serial_nos_after_submit update_serial_nos_after_submit(self, "items") # this sequence because outstanding may get -negative self.make_gl_entries() self.update_project() self.update_fixed_asset() def update_fixed_asset(self): for d in self.get("items"): if d.is_fixed_asset: asset = frappe.get_doc("Asset", d.asset) if self.docstatus==1: asset.purchase_invoice = self.name asset.purchase_date = self.posting_date asset.supplier = self.supplier else: asset.purchase_invoice = None asset.supplier = None asset.flags.ignore_validate_update_after_submit = True asset.save() def make_gl_entries(self, repost_future_gle=True): if not self.grand_total: return gl_entries = self.get_gl_entries() if gl_entries: update_outstanding = "No" if (cint(self.is_paid) or self.write_off_account) else "Yes" make_gl_entries(gl_entries, cancel=(self.docstatus == 2), update_outstanding=update_outstanding, merge_entries=False) if update_outstanding == "No": update_outstanding_amt(self.credit_to, "Supplier", self.supplier, self.doctype, self.return_against if cint(self.is_return) else self.name) if repost_future_gle and cint(self.update_stock) and self.auto_accounting_for_stock: from erpnext.controllers.stock_controller import update_gl_entries_after items, warehouses = self.get_items_and_warehouses() update_gl_entries_after(self.posting_date, self.posting_time, warehouses, items) elif self.docstatus == 2 and cint(self.update_stock) and self.auto_accounting_for_stock: delete_gl_entries(voucher_type=self.doctype, voucher_no=self.name) def get_gl_entries(self, warehouse_account=None): self.auto_accounting_for_stock = \ cint(frappe.defaults.get_global_default("auto_accounting_for_stock")) self.stock_received_but_not_billed = self.get_company_default("stock_received_but_not_billed") self.expenses_included_in_valuation = self.get_company_default("expenses_included_in_valuation") self.negative_expense_to_be_booked = 0.0 gl_entries = [] self.make_supplier_gl_entry(gl_entries) self.make_item_gl_entries(gl_entries) self.make_tax_gl_entries(gl_entries) gl_entries = merge_similar_entries(gl_entries) self.make_payment_gl_entries(gl_entries) self.make_write_off_gl_entry(gl_entries) return gl_entries def make_supplier_gl_entry(self, gl_entries): if self.grand_total: # Didnot use base_grand_total to book rounding loss gle grand_total_in_company_currency = flt(self.grand_total * self.conversion_rate, self.precision("grand_total")) gl_entries.append( self.get_gl_dict({ "account": self.credit_to, "party_type": "Supplier", "party": self.supplier, "against": self.against_expense_account, "credit": grand_total_in_company_currency, "credit_in_account_currency": grand_total_in_company_currency \ if self.party_account_currency==self.company_currency else self.grand_total, "against_voucher": self.return_against if cint(self.is_return) else self.name, "against_voucher_type": self.doctype, }, self.party_account_currency) ) def make_item_gl_entries(self, gl_entries): # item gl entries stock_items = self.get_stock_items() expenses_included_in_valuation = self.get_company_default("expenses_included_in_valuation") warehouse_account = get_warehouse_account() for item in self.get("items"): if flt(item.base_net_amount): account_currency = get_account_currency(item.expense_account) if self.update_stock and self.auto_accounting_for_stock and item.item_code in stock_items: val_rate_db_precision = 6 if cint(item.precision("valuation_rate")) <= 6 else 9 # warehouse account warehouse_debit_amount = flt(flt(item.valuation_rate, val_rate_db_precision) * flt(item.qty) * flt(item.conversion_factor), item.precision("base_net_amount")) gl_entries.append( self.get_gl_dict({ "account": item.expense_account, "against": self.supplier, "debit": warehouse_debit_amount, "remarks": self.get("remarks") or _("Accounting Entry for Stock"), "cost_center": item.cost_center, "project": item.project }, account_currency) ) # Amount added through landed-cost-voucher if flt(item.landed_cost_voucher_amount): gl_entries.append(self.get_gl_dict({ "account": expenses_included_in_valuation, "against": item.expense_account, "cost_center": item.cost_center, "remarks": self.get("remarks") or _("Accounting Entry for Stock"), "credit": flt(item.landed_cost_voucher_amount), "project": item.project })) # sub-contracting warehouse if flt(item.rm_supp_cost): supplier_warehouse_account = warehouse_account[self.supplier_warehouse]["name"] gl_entries.append(self.get_gl_dict({ "account": supplier_warehouse_account, "against": item.expense_account, "cost_center": item.cost_center, "remarks": self.get("remarks") or _("Accounting Entry for Stock"), "credit": flt(item.rm_supp_cost) }, warehouse_account[self.supplier_warehouse]["account_currency"])) else: gl_entries.append( self.get_gl_dict({ "account": item.expense_account, "against": self.supplier, "debit": flt(item.base_net_amount, item.precision("base_net_amount")), "debit_in_account_currency": (flt(item.base_net_amount, item.precision("base_net_amount")) if account_currency==self.company_currency else flt(item.net_amount, item.precision("net_amount"))), "cost_center": item.cost_center, "project": item.project }, account_currency) ) if self.auto_accounting_for_stock and self.is_opening == "No" and \ item.item_code in stock_items and item.item_tax_amount: # Post reverse entry for Stock-Received-But-Not-Billed if it is booked in Purchase Receipt if item.purchase_receipt: negative_expense_booked_in_pr = frappe.db.sql("""select name from `tabGL Entry` where voucher_type='Purchase Receipt' and voucher_no=%s and account=%s""", (item.purchase_receipt, self.expenses_included_in_valuation)) if not negative_expense_booked_in_pr: gl_entries.append( self.get_gl_dict({ "account": self.stock_received_but_not_billed, "against": self.supplier, "debit": flt(item.item_tax_amount, item.precision("item_tax_amount")), "remarks": self.remarks or "Accounting Entry for Stock" }) ) self.negative_expense_to_be_booked += flt(item.item_tax_amount, \ item.precision("item_tax_amount")) def make_tax_gl_entries(self, gl_entries): # tax table gl entries valuation_tax = {} for tax in self.get("taxes"): if tax.category in ("Total", "Valuation and Total") and flt(tax.base_tax_amount_after_discount_amount): account_currency = get_account_currency(tax.account_head) dr_or_cr = "debit" if tax.add_deduct_tax == "Add" else "credit" gl_entries.append( self.get_gl_dict({ "account": tax.account_head, "against": self.supplier, dr_or_cr: tax.base_tax_amount_after_discount_amount, dr_or_cr + "_in_account_currency": tax.base_tax_amount_after_discount_amount \ if account_currency==self.company_currency \ else tax.tax_amount_after_discount_amount, "cost_center": tax.cost_center }, account_currency) ) # accumulate valuation tax if self.is_opening == "No" and tax.category in ("Valuation", "Valuation and Total") and flt(tax.base_tax_amount_after_discount_amount): if self.auto_accounting_for_stock and not tax.cost_center: frappe.throw(_("Cost Center is required in row {0} in Taxes table for type {1}").format(tax.idx, _(tax.category))) valuation_tax.setdefault(tax.cost_center, 0) valuation_tax[tax.cost_center] += \ (tax.add_deduct_tax == "Add" and 1 or -1) * flt(tax.base_tax_amount_after_discount_amount) if self.is_opening == "No" and self.negative_expense_to_be_booked and valuation_tax: # credit valuation tax amount in "Expenses Included In Valuation" # this will balance out valuation amount included in cost of goods sold total_valuation_amount = sum(valuation_tax.values()) amount_including_divisional_loss = self.negative_expense_to_be_booked i = 1 for cost_center, amount in valuation_tax.items(): if i == len(valuation_tax): applicable_amount = amount_including_divisional_loss else: applicable_amount = self.negative_expense_to_be_booked * (amount / total_valuation_amount) amount_including_divisional_loss -= applicable_amount gl_entries.append( self.get_gl_dict({ "account": self.expenses_included_in_valuation, "cost_center": cost_center, "against": self.supplier, "credit": applicable_amount, "remarks": self.remarks or "Accounting Entry for Stock" }) ) i += 1 if self.update_stock and valuation_tax: for cost_center, amount in valuation_tax.items(): gl_entries.append( self.get_gl_dict({ "account": self.expenses_included_in_valuation, "cost_center": cost_center, "against": self.supplier, "credit": amount, "remarks": self.remarks or "Accounting Entry for Stock" }) ) def make_payment_gl_entries(self, gl_entries): # Make Cash GL Entries if cint(self.is_paid) and self.cash_bank_account and self.paid_amount: bank_account_currency = get_account_currency(self.cash_bank_account) # CASH, make payment entries gl_entries.append( self.get_gl_dict({ "account": self.credit_to, "party_type": "Supplier", "party": self.supplier, "against": self.cash_bank_account, "debit": self.base_paid_amount, "debit_in_account_currency": self.base_paid_amount \ if self.party_account_currency==self.company_currency else self.paid_amount, "against_voucher": self.return_against if cint(self.is_return) else self.name, "against_voucher_type": self.doctype, }, self.party_account_currency) ) gl_entries.append( self.get_gl_dict({ "account": self.cash_bank_account, "against": self.supplier, "credit": self.base_paid_amount, "credit_in_account_currency": self.base_paid_amount \ if bank_account_currency==self.company_currency else self.paid_amount }, bank_account_currency) ) def make_write_off_gl_entry(self, gl_entries): # writeoff account includes petty difference in the invoice amount # and the amount that is paid if self.write_off_account and flt(self.write_off_amount): write_off_account_currency = get_account_currency(self.write_off_account) gl_entries.append( self.get_gl_dict({ "account": self.credit_to, "party_type": "Supplier", "party": self.supplier, "against": self.write_off_account, "debit": self.base_write_off_amount, "debit_in_account_currency": self.base_write_off_amount \ if self.party_account_currency==self.company_currency else self.write_off_amount, "against_voucher": self.return_against if cint(self.is_return) else self.name, "against_voucher_type": self.doctype, }, self.party_account_currency) ) gl_entries.append( self.get_gl_dict({ "account": self.write_off_account, "against": self.supplier, "credit": flt(self.base_write_off_amount), "credit_in_account_currency": self.base_write_off_amount \ if write_off_account_currency==self.company_currency else self.write_off_amount, "cost_center": self.write_off_cost_center }) ) def on_cancel(self): self.check_for_closed_status() self.update_status_updater_args() if not self.is_return: from erpnext.accounts.utils import unlink_ref_doc_from_payment_entries if frappe.db.get_single_value('Accounts Settings', 'unlink_payment_on_cancellation_of_invoice'): unlink_ref_doc_from_payment_entries(self.doctype, self.name) self.update_prevdoc_status() self.update_billing_status_for_zero_amount_refdoc("Purchase Order") self.update_billing_status_in_pr() # Updating stock ledger should always be called after updating prevdoc status, # because updating ordered qty in bin depends upon updated ordered qty in PO if self.update_stock == 1: self.update_stock_ledger() self.make_gl_entries_on_cancel() self.update_project() self.update_fixed_asset() frappe.db.set(self, 'status', 'Cancelled') def update_project(self): project_list = [] for d in self.items: if d.project and d.project not in project_list: project = frappe.get_doc("Project", d.project) project.flags.dont_sync_tasks = True project.update_purchase_costing() project.save() project_list.append(d.project) def validate_supplier_invoice(self): if self.bill_date: if getdate(self.bill_date) > getdate(self.posting_date): frappe.throw(_("Supplier Invoice Date cannot be greater than Posting Date")) if self.bill_no: if cint(frappe.db.get_single_value("Accounts Settings", "check_supplier_invoice_uniqueness")): fiscal_year = get_fiscal_year(self.posting_date, company=self.company, as_dict=True) pi = frappe.db.sql('''select name from `tabPurchase Invoice` where bill_no = %(bill_no)s and name != %(name)s and docstatus < 2 and posting_date between %(year_start_date)s and %(year_end_date)s''', { "bill_no": self.bill_no, "name": self.name, "year_start_date": fiscal_year.year_start_date, "year_end_date": fiscal_year.year_end_date }) if pi: pi = pi[0][0] frappe.throw(_("Supplier Invoice No exists in Purchase Invoice {0}".format(pi))) def update_billing_status_in_pr(self, update_modified=True): updated_pr = [] for d in self.get("items"): if d.pr_detail: billed_amt = frappe.db.sql("""select sum(amount) from `tabPurchase Invoice Item` where pr_detail=%s and docstatus=1""", d.pr_detail) billed_amt = billed_amt and billed_amt[0][0] or 0 frappe.db.set_value("Purchase Receipt Item", d.pr_detail, "billed_amt", billed_amt, update_modified=update_modified) updated_pr.append(d.purchase_receipt) elif d.po_detail: updated_pr += update_billed_amount_based_on_po(d.po_detail, update_modified) for pr in set(updated_pr): frappe.get_doc("Purchase Receipt", pr).update_billing_percentage(update_modified=update_modified) def validate_fixed_asset_account(self): for d in self.get('items'): if d.is_fixed_asset: account_type = frappe.db.get_value("Account", d.expense_account, "account_type") if account_type != 'Fixed Asset': frappe.throw(_("Row {0}# Account must be of type 'Fixed Asset'").format(d.idx)) def on_recurring(self, reference_doc): self.due_date = None @frappe.whitelist() def make_debit_note(source_name, target_doc=None): from erpnext.controllers.sales_and_purchase_return import make_return_doc return make_return_doc("Purchase Invoice", source_name, target_doc) @frappe.whitelist() def get_fixed_asset_account(asset, account=None): if account: if frappe.db.get_value("Account", account, "account_type") != "Fixed Asset": account=None if not account: asset_category, company = frappe.db.get_value("Asset", asset, ["asset_category", "company"]) account = frappe.db.get_value("Asset Category Account", filters={"parent": asset_category, "company_name": company}, fieldname="fixed_asset_account") return account
gpl-3.0
-4,062,770,884,894,112,000
37.749632
184
0.689559
false
npilon/planterbox
planterbox/decorators.py
1
2059
"""Decorators used when building a package of planterbox features to define steps and hooks Also some private functions used by those decorators. """ from functools import partial import logging import re from six import ( string_types, ) log = logging.getLogger('planterbox') EXAMPLE_TO_FORMAT = re.compile(r'<(.+?)>') FEATURE_NAME = re.compile(r'\.feature(?:\:[\d,]+)?$') def make_step(pattern, multiline, fn): """Inner decorator for making a function usable as a step.""" planterbox_prefix = r'^\s*(?:Given|And|When|Then|But)\s+' planterbox_patterns = getattr(fn, 'planterbox_patterns', []) if multiline: if isinstance(multiline, string_types): pattern = pattern + r'\n(?P<{}>(?:.|\n)+)'.format(multiline) else: pattern = pattern + r'\n((?:.|\n)+)' planterbox_patterns.append( re.compile(planterbox_prefix + pattern, re.IGNORECASE)) fn.planterbox_patterns = planterbox_patterns return fn def step(pattern, multiline=False): """Decorate a function with a pattern so it can be used as a step. Optional arguments: - multiline: If true, this step-pattern will be turned into a multiline pattern. This adds a regular expression to the end that captures all remaining lines as a single group. If a string, that string will be used as the name of the multiline group. """ return partial(make_step, pattern, multiline) def make_hook(timing, stage, fn): """Inner decorator for making a function usable as a hook.""" planterbox_hook_timing = getattr(fn, 'planterbox_hook_timing', set()) planterbox_hook_timing.add((timing, stage)) fn.planterbox_hook_timing = planterbox_hook_timing return fn def hook(timing, stage): """Register a function as a hook to be run before or after """ if timing not in ('before', 'after'): raise ValueError(timing) if stage not in ('feature', 'scenario', 'step', 'error', 'failure'): raise ValueError(stage) return partial(make_hook, timing, stage)
mit
-6,669,727,340,165,458,000
29.731343
78
0.668771
false
rahimnathwani/measure-anything
project/auth/models.py
1
6772
from datetime import datetime import hashlib from werkzeug.security import generate_password_hash, check_password_hash from itsdangerous import TimedJSONWebSignatureSerializer as Serializer from flask import current_app from flask.ext.login import UserMixin, AnonymousUserMixin from .. import db, login_manager class Permission: ADMINISTER = 0x80 class Role(db.Model): __tablename__ = 'roles' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(64), unique=True) default = db.Column(db.Boolean, default=False, index=True) permissions = db.Column(db.Integer) users = db.relationship('User', backref='role', lazy='dynamic') @staticmethod def insert_roles(): roles = { 'User': (0x00, True), 'Administrator': (0xff, False) } for r in roles: role = Role.query.filter_by(name=r).first() if role is None: role = Role(name=r) role.permissions = roles[r][0] role.default = roles[r][1] db.session.add(role) db.session.commit() def __repr__(self): return '<Role %r>' % self.name class User(UserMixin, db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True) email = db.Column(db.String(64), unique=True, index=True) role_id = db.Column(db.Integer, db.ForeignKey('roles.id')) password_hash = db.Column(db.String(128)) confirmed = db.Column(db.Boolean, default=False) name = db.Column(db.String(64)) location = db.Column(db.String(64)) about_me = db.Column(db.Text()) member_since = db.Column(db.DateTime(), default=datetime.utcnow) last_seen = db.Column(db.DateTime(), default=datetime.utcnow) connections = db.relationship('Connection', backref=db.backref('user', lazy='joined'), cascade="all") estimates = db.relationship("Estimate", backref='user') @staticmethod def generate_fake(count=100): from sqlalchemy.exc import IntegrityError from random import seed import forgery_py seed() for i in range(count): u = User(email=forgery_py.internet.email_address(), password=forgery_py.lorem_ipsum.word(), confirmed=True, name=forgery_py.name.full_name(), location=forgery_py.address.city(), about_me=forgery_py.lorem_ipsum.sentence(), member_since=forgery_py.date.date(True)) db.session.add(u) try: db.session.commit() except IntegrityError: db.session.rollback() def __init__(self, **kwargs): super(User, self).__init__(**kwargs) if self.role is None: self.role = Role.query.filter_by(default=True).first() @property def password(self): raise AttributeError('password is not a readable attribute') @password.setter def password(self, password): self.password_hash = generate_password_hash(password) def verify_password(self, password): return check_password_hash(self.password_hash, password) def generate_confirmation_token(self, expiration=3600): s = Serializer(current_app.config['SECRET_KEY'], expiration) return s.dumps({'confirm': self.id}) def confirm(self, token): s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) except: return False if data.get('confirm') != self.id: return False self.confirmed = True db.session.add(self) return True def generate_reset_token(self, expiration=3600): s = Serializer(current_app.config['SECRET_KEY'], expiration) return s.dumps({'reset': self.id}) def reset_password(self, token, new_password): s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) except: return False if data.get('reset') != self.id: return False self.password = new_password db.session.add(self) return True def generate_email_change_token(self, new_email, expiration=3600): s = Serializer(current_app.config['SECRET_KEY'], expiration) return s.dumps({'change_email': self.id, 'new_email': new_email}) def change_email(self, token): s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) except: return False if data.get('change_email') != self.id: return False new_email = data.get('new_email') if new_email is None: return False if self.query.filter_by(email=new_email).first() is not None: return False self.email = new_email db.session.add(self) return True def can(self, permissions): return self.role is not None and \ (self.role.permissions & permissions) == permissions def is_administrator(self): return self.can(Permission.ADMINISTER) def ping(self): self.last_seen = datetime.utcnow() db.session.add(self) def to_json(self): json_user = { 'member_since': self.member_since, 'last_seen': self.last_seen, } return json_user def generate_auth_token(self, expiration): s = Serializer(current_app.config['SECRET_KEY'], expires_in=expiration) return s.dumps({'id': self.id}).decode('ascii') @staticmethod def verify_auth_token(token): s = Serializer(current_app.config['SECRET_KEY']) try: data = s.loads(token) except: return None return User.query.get(data['id']) def __repr__(self): return '<User %r>' % self.email class AnonymousUser(AnonymousUserMixin): def can(self, permissions): return False def is_administrator(self): return False class Connection(db.Model): __tablename__ = "connections" id = db.Column(db.Integer, primary_key=True) user_id = db.Column(db.Integer, db.ForeignKey('users.id')) oauth_provider = db.Column(db.String(255)) oauth_id = db.Column(db.String(255)) oauth_token = db.Column(db.String(255)) oauth_secret = db.Column(db.String(255)) display_name = db.Column(db.String(255)) full_name = db.Column(db.String(255)) profile_url = db.Column(db.String(512)) image_url = db.Column(db.String(512)) login_manager.anonymous_user = AnonymousUser @login_manager.user_loader def load_user(user_id): return User.query.get(int(user_id))
mit
-7,012,245,458,600,389,000
31.557692
73
0.603219
false
eubr-bigsea/tahiti
migrations/versions/1d7c21b6c7d2_add_keras_core_layer_operation_reshape.py
1
10248
# -*- coding: utf-8 -*- """Add Keras Core Layer Operation Reshape Revision ID: 1d7c21b6c7d2 Revises: 4a4b7df125b7 Create Date: 2018-11-01 10:26:22.659859 """ from alembic import op import sqlalchemy as sa from alembic import context from alembic import op from sqlalchemy import String, Integer, Text from sqlalchemy.orm import sessionmaker from sqlalchemy.sql import table, column, text # revision identifiers, used by Alembic. revision = '1d7c21b6c7d2' down_revision = '4a4b7df125b7' branch_labels = None depends_on = None KERAS_PLATAFORM_ID = 5 def _insert_operation_platform(): tb = table( 'operation_platform', column('operation_id', Integer), column('platform_id', Integer), ) columns = ('operation_id', 'platform_id') data = [ (5015, KERAS_PLATAFORM_ID),# Reshape ] rows = [dict(list(zip(columns, row))) for row in data] op.bulk_insert(tb, rows) def _insert_operation(): tb = table( 'operation', column('id', Integer), column('slug', String), column('enabled', Integer), column('type', String), column('icon', Integer),) columns = ('id', 'slug', 'enabled', 'type', 'icon') data = [ (5015, "reshape", 1, 'ACTION', ''), ] rows = [dict(list(zip(columns, row))) for row in data] op.bulk_insert(tb, rows) def _insert_operation_category(): tb = table( 'operation_category', column('id', Integer), column('type', String), column('order', Integer), column('default_order', Integer), ) columns = ('id', 'type', 'order', 'default_order') data = [ (5015, "subgroup", 8, 8),# Reshape ] rows = [dict(list(zip(columns, row))) for row in data] op.bulk_insert(tb, rows) def _insert_operation_category_operation(): tb = table( 'operation_category_operation', column('operation_id', Integer), column('operation_category_id', Integer)) columns = ('operation_category_id', 'operation_id') data = [ #Core Layers (5010, 5015),# Reshape ] rows = [dict(list(zip(columns, row))) for row in data] op.bulk_insert(tb, rows) def _insert_operation_translation(): tb = table( 'operation_translation', column('id', Integer), column('locale', String), column('name', String), column('description', String)) columns = ('id', 'locale', 'name', 'description') data = [ (5015, "en", 'Reshape', ''), ] rows = [dict(list(zip(columns, row))) for row in data] op.bulk_insert(tb, rows) def _insert_operation_port(): tb = table( 'operation_port', column('id', Integer), column('type', String), column('tags', String), column('order', Integer), column('multiplicity', String), column('operation_id', Integer), column('slug', String),) columns = ('id', 'type', 'tags', 'order', 'multiplicity', 'operation_id', 'slug') data = [ #Reshape (5115, 'INPUT', '', 1, 'ONE', 5015, 'input data'), (5215, 'OUTPUT', '', 1, 'ONE', 5015, 'output data'), ] rows = [dict(list(zip(columns, row))) for row in data] op.bulk_insert(tb, rows) def _insert_operation_port_interface_operation_port(): tb = table( 'operation_port_interface_operation_port', column('operation_port_id', Integer), column('operation_port_interface_id', Integer)) columns = ('operation_port_id', 'operation_port_interface_id') data = [ #Reshape (5115, 1), (5215, 1), ] rows = [dict(list(zip(columns, row))) for row in data] op.bulk_insert(tb, rows) def _insert_operation_port_translation(): tb = table( 'operation_port_translation', column('id', Integer), column('locale', String), column('name', String), column('description', String)) columns = ('id', 'locale', 'name', 'description') data = [ #Reshape (5115, "en", 'input data', 'Input data'), (5215, "en", 'output data', 'Output data'), ] rows = [dict(list(zip(columns, row))) for row in data] op.bulk_insert(tb, rows) def _insert_operation_form(): operation_form_table = table( 'operation_form', column('id', Integer), column('enabled', Integer), column('order', Integer), column('category', String), ) columns = ('id', 'enabled', 'order', 'category') data = [ #Reshape - target_shape (5132, 1, 1, 'execution'), #Reshape - input_shape #(5133, 1, 1, 'execution'), ] rows = [dict(list(zip(columns, row))) for row in data] op.bulk_insert(operation_form_table, rows) def _insert_operation_form_translation(): tb = table( 'operation_form_translation', column('id', Integer), column('locale', String), column('name', String)) columns = ('id', 'locale', 'name') data = [ #Reshape - target_shape (5132, 'en', 'Execution'), (5132, 'pt', 'Execução'), #Reshape - input_shape #(5133, 'en', 'Execution'), #(5133, 'pt', 'Execução'), ] rows = [dict(list(zip(columns, row))) for row in data] op.bulk_insert(tb, rows) def _insert_operation_operation_form(): tb = table( 'operation_operation_form', column('operation_id', Integer), column('operation_form_id', Integer)) columns = ('operation_id', 'operation_form_id') data = [ (5015, 41), #appearance #Reshape - target_shape (5015, 5132), # own execution form #Reshape - input_shape #(5015, 5133), # own execution form ] rows = [dict(list(zip(columns, row))) for row in data] op.bulk_insert(tb, rows) def _insert_operation_form_field(): tb = table( 'operation_form_field', column('id', Integer), column('name', String), column('type', String), column('required', Integer), column('order', Integer), column('default', Text), column('suggested_widget', String), column('values_url', String), column('values', String), column('scope', String), column('form_id', Integer), ) columns = ('id', 'name', 'type', 'required', 'order', 'default', 'suggested_widget', 'values_url', 'values', 'scope', 'form_id') data = [ #Reshape - target_shape (5132, 'target_shape', 'TEXT', 1, 1, None, 'text', None, None, 'EXECUTION', 5132), #Reshape - input_shape #(5133, 'input_shape', 'TEXT', 0, 2, None, 'text', None, None, 'EXECUTION', 5133), ] rows = [dict(list(zip(columns, row))) for row in data] op.bulk_insert(tb, rows) def _insert_operation_form_field_translation(): tb = table( 'operation_form_field_translation', column('id', Integer), column('locale', String), column('label', String), column('help', String), ) columns = ('id', 'locale', 'label', 'help') data = [ #Reshape - target_shape (5132, 'en', 'Target shape', 'Tuple of integers. Does not include the batch axis. Ex.: (6,2)'), #Reshape - input_shape #(5133, 'en', 'input_shape', 'Arbitrary, although all dimensions in the input shaped must be fixed. ' # 'Use the keyword argument input_shape (tuple of integers, does not ' # 'include the batch axis) when using this layer as the first ' # 'layer in a model. Ex.: (12,)'), ] rows = [dict(list(zip(columns, row))) for row in data] op.bulk_insert(tb, rows) all_commands = [ (_insert_operation, 'DELETE FROM operation WHERE id = 5015'), (_insert_operation_category, 'DELETE FROM operation_category WHERE id = 5015'), (_insert_operation_translation, 'DELETE FROM operation_translation WHERE id = 5015'), (_insert_operation_category_operation, 'DELETE FROM operation_category_operation WHERE operation_id = 5015'), (_insert_operation_platform, 'DELETE FROM operation_platform WHERE operation_id = 5015 AND platform_id = {}'.format(KERAS_PLATAFORM_ID)), (_insert_operation_port, 'DELETE FROM operation_port WHERE id IN (5115, 5215)'), (_insert_operation_port_interface_operation_port, 'DELETE FROM operation_port_interface_operation_port WHERE operation_port_id IN (5115, 5215)'), (_insert_operation_port_translation, 'DELETE FROM operation_port_translation WHERE id IN (5115, 5215)'), (_insert_operation_form, 'DELETE FROM operation_form WHERE id IN (5132, 5133)'), (_insert_operation_form_field, 'DELETE FROM operation_form_field WHERE id IN (5132, 5133)'), (_insert_operation_form_translation, 'DELETE FROM operation_form_translation WHERE id IN (5132, 5133)'), (_insert_operation_form_field_translation, 'DELETE FROM operation_form_field_translation WHERE id IN (5132, 5133)'), (_insert_operation_operation_form, 'DELETE FROM operation_operation_form WHERE operation_id = 5015'), ] def upgrade(): ctx = context.get_context() session = sessionmaker(bind=ctx.bind)() connection = session.connection() try: for cmd in all_commands: if isinstance(cmd[0], str): connection.execute(cmd[0]) elif isinstance(cmd[0], list): for row in cmd[0]: connection.execute(row) else: cmd[0]() except: session.rollback() raise session.commit() def downgrade(): ctx = context.get_context() session = sessionmaker(bind=ctx.bind)() connection = session.connection() try: for cmd in reversed(all_commands): if isinstance(cmd[1], str): connection.execute(cmd[1]) elif isinstance(cmd[1], list): for row in cmd[1]: connection.execute(row) else: cmd[1]() except: session.rollback() raise session.commit()
apache-2.0
7,580,760,651,073,423,000
28.268571
113
0.577314
false
gdestuynder/MozDef
cron/google2mozdef.py
1
8901
#!/usr/bin/env python # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # Copyright (c) 2014 Mozilla Corporation import sys import logging import requests import json from configlib import getConfig, OptionParser from datetime import datetime from logging.handlers import SysLogHandler from httplib2 import Http from oauth2client.client import SignedJwtAssertionCredentials from apiclient.discovery import build from mozdef_util.utilities.toUTC import toUTC logger = logging.getLogger(sys.argv[0]) logger.level=logging.INFO formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s') class State: def __init__(self, filename): '''Set the filename and populate self.data by calling self.read_stat_file()''' self.filename = filename self.read_state_file() def read_state_file(self): '''Populate self.data by reading and parsing the state file''' try: with open(self.filename, 'r') as f: self.data = json.load(f) except IOError: self.data = {} except ValueError: logger.error("%s state file found but isn't a recognized json format" % self.filename) raise except TypeError: logger.error("%s state file found and parsed but it doesn't contain an iterable object" % self.filename) raise def write_state_file(self): '''Write the self.data value into the state file''' with open(self.filename, 'w') as f: json.dump(self.data, f, sort_keys=True, indent=4, separators=(',', ': ')) def flattenDict(inDict, pre=None, values=True): '''given a dictionary, potentially with multiple sub dictionaries return a period delimited version of the dict with or without values i.e. {'something':'value'} becomes something=value {'something':{'else':'value'}} becomes something.else=value ''' pre = pre[:] if pre else [] if isinstance(inDict, dict): for key, value in inDict.iteritems(): if isinstance(value, dict): for d in flattenDict(value, pre + [key], values): yield d if isinstance(value,list): for listItem in value: for i in flattenDict(listItem,pre + [key],values): yield i else: if pre: if values: if isinstance(value, str): yield '.'.join(pre) + '.' + key + '=' + str(value) elif isinstance(value, unicode): yield '.'.join(pre) + '.' + key + '=' + value.encode('ascii', 'ignore') elif value is None: yield '.'.join(pre) + '.' + key + '=None' else: yield '.'.join(pre) + '.' + key else: if values: if isinstance(value, str): yield key + '=' + str(value) elif isinstance(value, unicode): yield key + '=' + value.encode('ascii', 'ignore') elif value is None: yield key + '=None' else: yield key else: yield '-'.join(pre) + '.' + inDict def main(): if options.output=='syslog': logger.addHandler(SysLogHandler(address=(options.sysloghostname,options.syslogport))) else: sh=logging.StreamHandler(sys.stderr) sh.setFormatter(formatter) logger.addHandler(sh) logger.debug('started') state = State(options.state_file_name) try: # capture the time we start running so next time we catch any events # created while we run. lastrun=toUTC(datetime.now()).isoformat() # get our credentials mozdefClient=json.loads(open(options.jsoncredentialfile).read()) client_email = mozdefClient['client_email'] private_key=mozdefClient['private_key'] # set the oauth scope we will request scope=[ 'https://www.googleapis.com/auth/admin.reports.audit.readonly', 'https://www.googleapis.com/auth/admin.reports.usage.readonly' ] # authorize our http object # we do this as a 'service account' so it's important # to specify the correct 'sub' option # or you will get access denied even with correct delegations/scope credentials = SignedJwtAssertionCredentials(client_email, private_key, scope=scope, sub=options.impersonate) http = Http() credentials.authorize(http) # build a request to the admin sdk api = build('admin', 'reports_v1', http=http) response = api.activities().list(userKey='all', applicationName='login', startTime=toUTC(state.data['lastrun']).strftime('%Y-%m-%dT%H:%M:%S.000Z'), maxResults=options.recordlimit).execute() # fix up the event craziness to a flatter format events=[] if 'items' in response: for i in response['items']: # flatten the sub dict/lists to pull out the good parts event=dict(category='google') event['tags']=['google','authentication'] event['severity']='INFO' event['summary']='google authentication: ' details=dict() for keyValue in flattenDict(i): # change key/values like: # actor.email=someone@mozilla.com # to actor_email=value key,value =keyValue.split('=') key=key.replace('.','_').lower() details[key]=value # find important keys # and adjust their location/name if 'ipaddress' in details: # it's the source ip details['sourceipaddress']=details['ipaddress'] del details['ipaddress'] if 'id_time' in details: event['timestamp']=details['id_time'] event['utctimestamp']=details['id_time'] if 'events_name' in details: event['summary']+= details['events_name'] + ' ' if 'actor_email' in details: event['summary']+= details['actor_email'] + ' ' event['details']=details events.append(event) # post events to mozdef logger.debug('posting {0} google events to mozdef'.format(len(events))) for e in events: requests.post(options.url,data=json.dumps(e)) # record the time we started as # the start time for next time. state.data['lastrun'] = lastrun state.write_state_file() except Exception as e: logger.error("Unhandled exception, terminating: %r" % e) def initConfig(): options.output=getConfig('output','stdout',options.configfile) # output our log to stdout or syslog options.sysloghostname=getConfig('sysloghostname','localhost',options.configfile) # syslog hostname options.syslogport=getConfig('syslogport',514,options.configfile) # syslog port options.url = getConfig('url', 'http://localhost:8080/events', options.configfile) # mozdef event input url to post to options.state_file_name = getConfig('state_file_name','{0}.state'.format(sys.argv[0]),options.configfile) options.recordlimit = getConfig('recordlimit', 1000, options.configfile) # max number of records to request # # See # https://developers.google.com/admin-sdk/reports/v1/guides/delegation # for detailed information on delegating a service account for use in gathering google admin sdk reports # # google's json credential file exported from the project/admin console options.jsoncredentialfile=getConfig('jsoncredentialfile','/path/to/filename.json',options.configfile) # email of admin to impersonate as a service account options.impersonate = getConfig('impersonate', 'someone@yourcompany.com', options.configfile) if __name__ == '__main__': parser=OptionParser() parser.add_option("-c", dest='configfile', default=sys.argv[0].replace('.py', '.conf'), help="configuration file to use") (options,args) = parser.parse_args() initConfig() main()
mpl-2.0
-2,776,831,902,477,656,600
40.593458
125
0.569037
false
bubbleboy14/cantools
cantools/web/dez_server/daemons.py
1
2550
import sys, json, gc, os try: import psutil except ImportError as e: pass # google crap engine (get it if you need it!) from dez.memcache import get_memcache from dez.http.application import HTTPApplication from .routes import static, cb from cantools import config sys.path.insert(0, ".") # for dynamically loading modules A_STATIC = { "dynamic": { "/": "_/dynamic", "/css/": "_/css", "/js/CT/": "js/CT", "/logs/": "logs", "/logs": "logs" }, "static": { "/": "_/static", "/css/": "_/css", "/js/CT/": "js/CT", "/logs/": "logs", "/logs": "logs" }, "production": { "/": "_/production", "/css/": "_/css", "/logs/": "logs", "/logs": "logs" }, } A_CB = { "/admin": "admin", "/_db": "_db" } class CTWebBase(HTTPApplication): def __init__(self, bind_address, port, logger_getter, static=static, cb=cb, whitelist=[]): isprod = config.mode == "production" HTTPApplication.__init__(self, bind_address, port, logger_getter, "dez/cantools", config.ssl.certfile, config.ssl.keyfile, config.ssl.cacerts, isprod, config.web.rollz, isprod, whitelist) self.memcache = get_memcache() self.handlers = {} for key, val in list(static.items()): self.add_static_rule(key, val) for key, val in list(cb.items()): self.add_cb_rule(key, self._handler(key, val)) def _handler(self, rule, target): self.logger.info("setting handler: %s %s"%(rule, target)) def h(req): self.logger.info("triggering handler: %s %s"%(rule, target)) self.controller.trigger_handler(rule, target, req) return h class Web(CTWebBase): def __init__(self, bind_address, port, logger_getter): self.logger = logger_getter("Web") CTWebBase.__init__(self, bind_address, port, logger_getter, whitelist=config.web.whitelist) class Admin(CTWebBase): def __init__(self, bind_address, port, logger_getter): self.logger = logger_getter("Admin") CTWebBase.__init__(self, bind_address, port, logger_getter, A_STATIC[config.mode], A_CB, config.admin.whitelist) self.add_cb_rule("/_report", self.report) def report(self, req): report = json.dumps({ "web": self.controller.web.daemon.counter.report(), "admin": self.daemon.counter.report(), "gc": len(gc.get_objects()), "mem": psutil.Process(os.getpid()).memory_percent() }) req.write("HTTP/1.0 200 OK\r\n\r\n%s"%(report,)) req.close()
mit
6,818,281,954,024,664,000
41.5
109
0.59098
false
phtj/eddex
houdini/python_libs/houdini_ea/wrappers.py
1
4084
""" Executor classes used for executing tasks. """ import sys, os import hou from bson.binary import Binary #Current working directory HOU_FOLDER_PATH = os.getcwd() #Helper functions def _load_hip_file(hip_file_name): """Attempt to load a hip file in Houdini """ #Replacing "\\" with "/" is required to avoid errors in evaluating "$HIP" hou_file_path = os.path.join(HOU_FOLDER_PATH, hip_file_name).replace("\\", "/") try: result = hou.hipFile.load(hou_file_path) except hou.LoadWarning as e: print "hou.LoadWarning exception loading hip file" print str(e) raise except hou.OperationFailed as e: print "hou.OperationFailed exception loading hip file" print str(e) raise except Exception as e: print "Exception loading hip file" print str(e) raise except: print "Unrecognised exception loading hip file" raise if result: print "Warnings loading hip file: ", result def _get_hou_node(node_path): """Attempt to get a node from hou """ node = hou.node(node_path) if not node: print "ERROR: Houdini node " + node_path + " does not exist." raise Exception() return node def _cook_hou_node(node, animate = None): """cook a node in a houdini file""" if animate is not None: node.cook(force=True, frame_range=animate) else: node.cook(force=True) def _set_hou_node_parameters(node, prefix, values, start_index=1): """set parameter values of a houdini node""" for i, v in enumerate(values): node.setParms({prefix+str(i+start_index):v}) def _get_hou_node_attributes(node, attribute_names): """get the attribute values of a houdini node (detail attributes)""" results = [] for attribute_name in attribute_names: result = node.geometry().attribValue(attribute_name) results.append(result) return results def _temp_dir(): """Create an empty folder. (If the folder exists, delete it.) """ temp_dir = os.path.join(os.getcwd(), "temp") if not os.path.exists(temp_dir): os.mkdir(temp_dir) else: for the_file in os.listdir(temp_dir): file_path = os.path.join(temp_dir, the_file) try: if os.path.isfile(file_path): os.unlink(file_path) except Exception, e: print e return temp_dir # Functions for executing tasks def houdini_develop(ind, hip_file_name, in_path, out_path, animate=None): #get the genotype genotype = ind.get_genotype() #open the hipnc file _load_hip_file(hip_file_name) #set the parameters using the individual's genes genotype_node = _get_hou_node(in_path) _set_hou_node_parameters(genotype_node, "gene_", genotype) #save phenotype to file phen_file_path = os.path.join(_temp_dir(), "temp.bgeo") phenotype_node = _get_hou_node(out_path) phenotype_node.setParms(dict([["file",phen_file_path]])) _cook_hou_node(phenotype_node, animate) # get and save the phenotype with open(phen_file_path, "rb") as f: phenotype = f.read() return Binary(phenotype) def houdini_evaluate(ind, score_names, hip_file_name, in_path, out_path, animate=None): #get the phenotype phenotype = ind.get_phenotype() #write the phenotype to a temporary file phen_file_path = os.path.join(_temp_dir(), "temp.bgeo") with open(phen_file_path, "wb") as f: f.write(phenotype) #open the phenotype hipnc file _load_hip_file(hip_file_name) #load the geometry into the phenotype node phenotype_node = _get_hou_node(in_path) phenotype_node.setParms(dict([["file",phen_file_path]])) #cook the score node score_node = _get_hou_node(out_path) _cook_hou_node(score_node, animate) #get and save all the scores score_values = [] for score_name in score_names: score_value = score_node.geometry().attribValue(score_name) score_values.append(score_value) return score_values
gpl-3.0
4,568,971,612,252,445,000
33.033333
87
0.63859
false
fugitifduck/exabgp
lib/exabgp/protocol/family.py
1
5287
# encoding: utf-8 """ address.py Created by Thomas Mangin on 2010-01-19. Copyright (c) 2009-2015 Exa Networks. All rights reserved. """ from struct import pack from struct import unpack # =================================================================== AFI # http://www.iana.org/assignments/address-family-numbers/ class AFI (int): undefined = 0x00 # internal ipv4 = 0x01 ipv6 = 0x02 l2vpn = 0x19 Family = { ipv4: 0x02, # socket.AF_INET, ipv6: 0x30, # socket.AF_INET6, l2vpn: 0x02, # l2vpn info over ipv4 session } names = { 'ipv4': ipv4, 'ipv6': ipv6, 'l2vpn': l2vpn, } def __str__ (self): if self == 0x01: return "ipv4" if self == 0x02: return "ipv6" if self == 0x19: return "l2vpn" return "unknown afi %d" % self def __repr__ (self): return str(self) def name (self): if self == 0x01: return "inet4" if self == 0x02: return "inet6" if self == 0x19: return "l2vpn" return "unknown afi" def pack (self): return pack('!H',self) @staticmethod def unpack (data): return AFI(unpack('!H',data)[0]) @staticmethod def value (name): if name == "ipv4": return AFI.ipv4 if name == "ipv6": return AFI.ipv6 return None @staticmethod def implemented_safi (afi): if afi == 'ipv4': return ['unicast','multicast','nlri-mpls','mpls-vpn','flow','flow-vpn'] if afi == 'ipv6': return ['unicast','mpls-vpn','flow','flow-vpn'] if afi == 'l2vpn': return ['vpls'] return [] @classmethod def fromString (cls, string): return cls.names.get(string,cls.undefined) # =================================================================== SAFI # http://www.iana.org/assignments/safi-namespace class SAFI (int): undefined = 0 # internal unicast = 1 # [RFC4760] multicast = 2 # [RFC4760] # deprecated = 3 # [RFC4760] nlri_mpls = 4 # [RFC3107] # mcast_vpn = 5 # [draft-ietf-l3vpn-2547bis-mcast-bgp] (TEMPORARY - Expires 2008-06-19) # pseudowire = 6 # [draft-ietf-pwe3-dynamic-ms-pw] (TEMPORARY - Expires 2008-08-23) Dynamic Placement of Multi-Segment Pseudowires # encapsulation = 7 # [RFC5512] # tunel = 64 # [Nalawade] vpls = 65 # [RFC4761] # bgp_mdt = 66 # [Nalawade] # bgp_4over6 = 67 # [Cui] # bgp_6over4 = 67 # [Cui] # vpn_adi = 69 # [RFC-ietf-l1vpn-bgp-auto-discovery-05.txt] evpn = 70 # [draft-ietf-l2vpn-evpn] mpls_vpn = 128 # [RFC4364] # mcast_bgp_mpls_vpn = 129 # [RFC2547] # rt = 132 # [RFC4684] rtc = 132 # [RFC4684] flow_ip = 133 # [RFC5575] flow_vpn = 134 # [RFC5575] # vpn_ad = 140 # [draft-ietf-l3vpn-bgpvpn-auto] # private = [_ for _ in range(241,254)] # [RFC4760] # unassigned = [_ for _ in range(8,64)] + [_ for _ in range(70,128)] # reverved = [0,3] + [130,131] + [_ for _ in range(135,140)] + [_ for _ in range(141,241)] + [255,] # [RFC4760] names = { 'unicast': unicast, 'multicast': multicast, 'nlri-mpls': nlri_mpls, 'vpls': vpls, 'evpn': evpn, 'mpls-vpn': mpls_vpn, 'rtc': rtc, 'flow': flow_ip, 'flow-vpn': flow_vpn, } def name (self): if self == 0x01: return "unicast" if self == 0x02: return "multicast" if self == 0x04: return "nlri-mpls" if self == 0x46: return "evpn" if self == 0x80: return "mpls-vpn" if self == 0x84: return "rtc" if self == 0x85: return "flow" if self == 0x86: return "flow-vpn" if self == 0x41: return "vpls" return "unknown safi %d" % self def __str__ (self): return self.name() def __repr__ (self): return str(self) def pack (self): return chr(self) @staticmethod def unpack (data): return SAFI(ord(data)) def has_label (self): return self in (self.nlri_mpls,self.mpls_vpn) def has_rd (self): return self in (self.mpls_vpn,) # technically self.flow_vpn and self.vpls has an RD but it is not an NLRI @staticmethod def value (name): if name == "unicast": return 0x01 if name == "multicast": return 0x02 if name == "nlri-mpls": return 0x04 if name == "mpls-vpn": return 0x80 if name == "flow": return 0x85 if name == "flow-vpn": return 0x86 if name == "vpls": return 0x41 return None @classmethod def fromString (cls, string): return cls.names.get(string,cls.undefined) def known_families (): # it can not be a generator families = [ (AFI(AFI.ipv4), SAFI(SAFI.unicast)), (AFI(AFI.ipv4), SAFI(SAFI.multicast)), (AFI(AFI.ipv4), SAFI(SAFI.nlri_mpls)), (AFI(AFI.ipv4), SAFI(SAFI.mpls_vpn)), (AFI(AFI.ipv4), SAFI(SAFI.flow_ip)), (AFI(AFI.ipv4), SAFI(SAFI.flow_vpn)), (AFI(AFI.ipv6), SAFI(SAFI.unicast)), (AFI(AFI.ipv6), SAFI(SAFI.mpls_vpn)), (AFI(AFI.ipv6), SAFI(SAFI.flow_ip)), (AFI(AFI.ipv6), SAFI(SAFI.flow_vpn)), (AFI(AFI.l2vpn), SAFI(SAFI.vpls)) ] return families class Family (object): def __init__ (self, afi, safi): self.afi = AFI(afi) self.safi = SAFI(safi) def extensive (self): return 'afi %s safi %s' % (self.afi,self.safi) def __str__ (self): return 'family %s %s' % (self.afi,self.safi)
bsd-3-clause
2,321,827,960,558,510,000
22.923077
142
0.566106
false
DeV1doR/ethereumd-proxy
ethereumd/proxy.py
1
36272
import operator import asyncio import re import logging from enum import IntEnum from aioethereum import create_ethereum_client from aioethereum.errors import BadResponseError from .utils import hex_to_dec, wei_to_ether, ether_to_gwei, ether_to_wei GAS_AMOUNT = 21000 GAS_PRICE = 20 # Gwei DEFAUT_FEE = wei_to_ether(ether_to_gwei(GAS_PRICE) * GAS_AMOUNT) class Category(IntEnum): Blockchain = 0 Control = 1 Generating = 2 Mining = 3 Network = 4 Rawtransactions = 5 Util = 6 Wallet = 7 class Method: _r = {} @classmethod def registry(cls, category): def decorator(fn): cls._r.setdefault('category_%s' % int(category), []) \ .append(fn.__name__) return fn return decorator @classmethod def get_categories(cls): for key, funcs in cls._r.items(): if 'category_' in key: yield (Category(int(key.replace('category_', ''))).name, funcs) class EthereumProxy: def __init__(self, rpc): self._rpc = rpc self._log = logging.getLogger('ethereum-proxy') async def help(self, command=None): """"help ( "command" ) List all commands, or get help for a specified command. Arguments: 1. "command" (string, optional) The command to get help on Result: "text" (string) The help text """ if command: func = getattr(EthereumProxy, command, None) if func: return func.__doc__ return 'help: unknown command: %s' % command result = "" for category, funcs in Method.get_categories(): result += "== %s ==\n" % category for func in funcs: doc = getattr(EthereumProxy, func).__doc__ if not doc: result += func + '\n' else: result += doc.split('\n')[0] + '\n' result += "\n" result = result.rstrip('\n') return result @Method.registry(Category.Util) async def validateaddress(self, address): """validateaddress "address" Return information about the given ethereum address. Arguments: 1. "address" (string, required) The ethereum address to validate Result: { "isvalid" : true|false, (boolean) If the address is valid or not. If not, this is the only property returned. "address" : "address", (string) The ethereum address validated "scriptPubKey" : "hex", (string) The hex encoded scriptPubKey generated by the address "ismine" : true|false, (boolean) If the address is yours or not "iswatchonly" : true|false, (boolean) If the address is watchonly "isscript" : true|false, (boolean) If the key is a script "pubkey" : "publickeyhex", (string) The hex value of the raw public key "iscompressed" : true|false, (boolean) If the address is compressed "timestamp" : timestamp, (number, optional) The creation time of the key if available in seconds since epoch (Jan 1 1970 GMT) } Examples: > ethereum-cli validateaddress "0x6cace0528324a8afc2b157ceba3cdd2a27c4e21f" > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "validateaddress", "params": ["0x6cace0528324a8afc2b157ceba3cdd2a27c4e21f"] }' http://127.0.0.01:9500/ """ if not isinstance(address, (bytes, str, bytearray)): return { 'isvalid': False } address = '0x{0}'.format(address) if len(address) == 40 else address if len(address) != 42: return { 'isvalid': False } elif re.match(r"^((0x)|(0X))?[0-9a-fA-F]{40}", address): return { 'isvalid': True, 'address': address, 'scriptPubKey': 'hex', 'ismine': (True if address in (await self._rpc.eth_accounts()) else False), 'iswatchonly': False, # TODO 'isscript': False, 'pubkey': address, 'iscompressed': False, 'timestamp': None, # TODO } else: return { 'isvalid': False } @Method.registry(Category.Wallet) async def listsinceblock(self, blockhash, target_confirmations=1, include_watchonly=False): """listsinceblock ( "blockhash" target_confirmations include_watchonly) Get all transactions in blocks since block [blockhash] Arguments: 1. "blockhash" (string, required) The block hash to list transactions since 2. target_confirmations: (numeric, optional) The confirmations required, must be 1 or more 3. include_watchonly: (bool, optional, default=false) Include transactions to watch-only addresses (see 'importaddress') Result: { "transactions": [ "address":"address", (string) The ethereum address of the transaction. Not present for move transactions (category = move). "category":"send|receive", (string) The transaction category. 'send' has negative amounts, 'receive' has positive amounts. "amount": x.xxx, (numeric) The amount in BTC. This is negative for the 'send' category, and for the 'move' category for moves outbound. It is positive for the 'receive' category, and for the 'move' category for inbound funds. "vout" : n, (numeric) the vout value "fee": x.xxx, (numeric) The amount of the fee in BTC. This is negative and only available for the 'send' category of transactions. "confirmations": n, (numeric) The number of confirmations for the transaction. Available for 'send' and 'receive' category of transactions. When it's < 0, it means the transaction conflicted that many blocks ago. "blockhash": "hashvalue", (string) The block hash containing the transaction. Available for 'send' and 'receive' category of transactions. "blockindex": n, (numeric) The index of the transaction in the block that includes it. Available for 'send' and 'receive' category of transactions. "blocktime": xxx, (numeric) The block time in seconds since epoch (1 Jan 1970 GMT). "txid": "transactionid", (string) The transaction id. Available for 'send' and 'receive' category of transactions. "time": xxx, (numeric) The transaction time in seconds since epoch (Jan 1 1970 GMT). "timereceived": xxx, (numeric) The time received in seconds since epoch (Jan 1 1970 GMT). Available for 'send' and 'receive' category of transactions. "abandoned": xxx, (bool) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the 'send' category of transactions. "comment": "...", (string) If a comment is associated with the transaction. "label" : "label" (string) A comment for the address/transaction, if any "to": "...", (string) If a comment to is associated with the transaction. ], "lastblock": "lastblockhash" (string) The hash of the last block } Examples: > ethereum-cli listsinceblock > ethereum-cli listsinceblock "0x2a7f92d11cf8194f2bc8976e0532a9d7735e60e99e3339cb2316bd4c5b4137ce" > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "listsinceblock", "params": ["0x2a7f92d11cf8194f2bc8976e0532a9d7735e60e99e3339cb2316bd4c5b4137ce"] }' http://127.0.0.01:9500/ """ # TODO: Optimization?? # TODO: Correct return data if target_confirmations < 1: raise BadResponseError('Invalid parameter', code=-8) transactions = [] latest_block, from_block, addresses = await asyncio.gather( self._rpc.eth_getBlockByNumber(), self._rpc.eth_getBlockByHash(blockhash), self._rpc.eth_accounts() ) if target_confirmations == 1: lst_hash = await self.getbestblockhash() else: need_height = hex_to_dec(latest_block['number']) + 1 - \ target_confirmations lst_hash = (await self._rpc.eth_getBlockByNumber(need_height))['hash'] if not from_block: return { 'transactions': transactions, 'lastblock': lst_hash, } start_height = hex_to_dec(from_block['number']) + 1 end_height = hex_to_dec(latest_block['number']) def _fetch_block_transacs(addresses, block, tr): category = None if tr['from'] in addresses: address = tr['to'] category = 'send' elif tr['to'] in addresses: address = tr['to'] category = 'receive' if category: return { 'address': address, 'category': category, 'amount': wei_to_ether(hex_to_dec(tr['value'])), 'vout': 1, 'fee': (hex_to_dec(tr['gasPrice']) * wei_to_ether(hex_to_dec(tr['gas']))), 'confirmations': (end_height + 1 - hex_to_dec(tr['blockNumber'])), 'blockhash': tr['blockHash'], 'blockindex': None, # TODO 'blocktime': hex_to_dec(block['timestamp']), 'txid': tr['hash'], 'time': hex_to_dec(block['timestamp']), 'timereceived': None, # TODO 'abandoned': False, # TODO 'comment': None, # TODO 'label': None, # TODO 'to': None, # TODO } blocks = [from_block] blocks.extend(filter(lambda b: b is not None, await asyncio.gather(*( self._rpc.eth_getBlockByNumber(height) for height in range(start_height, end_height))))) blocks.append(latest_block) for block in blocks: for tr in block['transactions']: if not ( tr['to'] in addresses and tr['from'] in addresses ): fetched_tr = _fetch_block_transacs(addresses, block, tr) if fetched_tr: transactions.append(fetched_tr) return { 'transactions': transactions, 'lastblock': lst_hash, } @Method.registry(Category.Wallet) async def walletpassphrase(self, address, passphrase, timeout): """walletpassphrase "passphrase" timeout Stores the wallet decryption key in memory for 'timeout' seconds. This is needed prior to performing transactions related to private keys such as sending ether Arguments: 1. "address" (string, required) Address of account. 2. "passphrase" (string, required) The wallet passphrase 3. timeout (numeric, required) The time to keep the decryption key in seconds. Note: Issuing the walletpassphrase command while the wallet is already unlocked will set a new unlock time that overrides the old one. Examples: unlock the wallet for 60 seconds > ethereum-cli walletpassphrase "0x6cace0528324a8afc2b157ceba3cdd2a27c4e21f" "my pass phrase" 60 Lock the wallet again (before 60 seconds) > ethereum-cli walletlock "0x6cace0528324a8afc2b157ceba3cdd2a27c4e21f" As json rpc call > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "walletpassphrase", "params": ["0x6cace0528324a8afc2b157ceba3cdd2a27c4e21f", "my pass phrase", 60] }' http://127.0.0.01:9500/ """ return await self._rpc.personal_unlockAccount(address, passphrase, timeout) @Method.registry(Category.Wallet) async def walletlock(self, address): """walletlock Removes the wallet encryption key from memory, locking the wallet. After calling this method, you will need to call walletpassphrase again before being able to call any methods which require the wallet to be unlocked. Arguments: 1. "address" (string, required) Address of account. Examples: Clear the passphrase since we are done before 2 minutes is up > ethereum-cli walletlock "0x6cace0528324a8afc2b157ceba3cdd2a27c4e21f" As json rpc call > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "walletlock", "params": ["0x6cace0528324a8afc2b157ceba3cdd2a27c4e21f"] }' http://127.0.0.01:9500/ """ return await self._rpc.personal_lockAccount(address) @Method.registry(Category.Blockchain) async def getblockhash(self, height): """getblockhash height Returns hash of block in best-block-chain at height provided. Arguments: 1. height (numeric, required) The height index Result: "hash" (string) The block hash Examples: > ethereum-cli getblockhash 1000 > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "getblockhash", "params": [1000] }' http://127.0.0.01:9500/ """ if height < 0: raise BadResponseError('Block height out of range', code=-8) block = await self._rpc.eth_getBlockByNumber(height) if block is None: raise BadResponseError('Block height out of range', code=-8) return block['hash'] @Method.registry(Category.Blockchain) async def getdifficulty(self): """getdifficulty Returns the proof-of-work difficulty as a multiple of the minimum difficulty. Result: n.nnn (numeric) the proof-of-work difficulty as a multiple of the minimum difficulty. Examples: > ethereum-cli getdifficulty > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "getdifficulty", "params": [] }' http://127.0.0.01:9500/ """ return await self._rpc.eth_hashrate() @Method.registry(Category.Util) async def estimatefee(self, nblocks=1): """estimatefee nblocks Estimates the approximate fee needed for a transaction to begin confirmation within nblocks blocks. Arguments: 1. nblocks (numeric, required) DEPRECATED. Result: n (numeric) estimated fee-per-kilobyte Example: > ethereum-cli estimatefee """ gas = await self._paytxfee_to_etherfee() return wei_to_ether(gas['gas_amount'] * gas['gas_price']) @Method.registry(Category.Wallet) async def getbalance(self, account=None, minconf=1, include_watchonly=True): """getbalance ( "account" minconf include_watchonly ) If account is not specified, returns the server's total available balance. If account is specified (DEPRECATED), returns the balance in the account. Note that the account "" is not the same as leaving the parameter out. The server total may be different to the balance in the default "" account. Arguments: 1. "account" (string, optional) DEPRECATED. The account string may be given as a specific account name to find the balance associated with wallet keys in a named account, or as the empty string ("") to find the balance associated with wallet keys not in any named account, or as "*" to find the balance associated with all wallet keys regardless of account. When this option is specified, it calculates the balance in a different way than when it is not specified, and which can count spends twice when there are conflicting pending transactions (such as those created by the bumpfee command), temporarily resulting in low or even negative balances. In general, account balance calculation is not considered reliable and has resulted in confusing outcomes, so it is recommended to avoid passing this argument. 2. minconf (numeric, optional, default=1) Only include transactions confirmed at least this many times. 3. include_watchonly (bool, optional, default=false) DEPRECATED. Also include balance in watch-only addresses (see 'importaddress') Result: amount (numeric) The total amount in BTC received for this account. Examples: The total amount in the wallet > ethereum-cli getbalance The total amount in the wallet at least 5 blocks confirmed > ethereum-cli getbalance "*" 6 As a json rpc call > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "getbalance", "params": ["*", 6] }' http://127.0.0.01:9500/ """ # NOTE: minconf nt work curently async def _get_balance(address): balance = (await self._rpc.eth_getBalance(address)) or 0 if not isinstance(balance, (int, float)): balance = hex_to_dec(balance) return wei_to_ether(balance) if account: return await _get_balance(account) addresses = await self._rpc.eth_accounts() return sum(await asyncio.gather(*(_get_balance(address) for address in addresses))) @Method.registry(Category.Wallet) async def settxfee(self, amount): """settxfee amount Set the transaction fee for transactions only. Overwrites the paytxfee parameter. Arguments: 1. amount (numeric or string, required) The transaction fee in ether Result true|false (boolean) Returns true if successful Examples: > ethereum-cli settxfee 0.00042 > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "settxfee", "params": [0.00042] }' http://127.0.0.01:9500/ """ if isinstance(amount, (int, float)) and amount <= 0: raise BadResponseError('Amount out of range', code=-3) try: self._paytxfee = float(amount) except Exception: return False else: return True @Method.registry(Category.Wallet) async def listaccounts(self, minconf=1, include_watchonly=True): """listaccounts ( minconf include_watchonly) Arguments: 1. minconf (numeric, optional, default=1) Only include transactions with at least this many confirmations 2. include_watchonly (bool, optional, default=false) DEPRECATED. Include balances in watch-only addresses (see 'importaddress') Result: { (json object where keys are account names, and values are numeric balances "account": x.xxx, (numeric) The property name is the account name, and the value is the total balance for the account. ... } Examples: List account balances where there at least 1 confirmation > ethereum-cli listaccounts List account balances including zero confirmation transactions > ethereum-cli listaccounts 0 List account balances for 6 or more confirmations > ethereum-cli listaccounts 6 As json rpc call > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "listaccounts", "params": [6] }' http://127.0.0.01:9500/ """ # NOTE: minconf nt work curently addresses = await self._rpc.eth_accounts() accounts = {} for i, address in enumerate(addresses): # account = 'Account #{0}'.format(i) balance = (await self._rpc.eth_getBalance(address)) or 0 if not isinstance(balance, (int, float)): balance = hex_to_dec(balance) accounts[address] = wei_to_ether(balance) return accounts @Method.registry(Category.Wallet) async def gettransaction(self, txid, include_watchonly=False): """gettransaction "txid" ( include_watchonly ) Get detailed information about in-wallet transaction <txid> Arguments: 1. "txid" (string, required) The transaction id 2. "include_watchonly" (bool, optional, default=false) DEPRECATED. Whether to include watch-only addresses in balance calculation and details[] Result: { "amount" : x.xxx, (numeric) The transaction amount in BTC "fee": x.xxx, (numeric) The amount of the fee in BTC. This is negative and only available for the 'send' category of transactions. "confirmations" : n, (numeric) The number of confirmations "blockhash" : "hash", (string) The block hash "blockindex" : xx, (numeric) The index of the transaction in the block that includes it "blocktime" : ttt, (numeric) The time in seconds since epoch (1 Jan 1970 GMT) "txid" : "transactionid", (string) The transaction id. "time" : ttt, (numeric) The transaction time in seconds since epoch (1 Jan 1970 GMT) "timereceived" : ttt, (numeric) The time received in seconds since epoch (1 Jan 1970 GMT) "details" : [ { "address" : "address", (string) The ethereum address involved in the transaction "category" : "send|receive", (string) The category, either 'send' or 'receive' "amount" : x.xxx, (numeric) The amount in BTC "label" : "label", (string) A comment for the address/transaction, if any "vout" : n, (numeric) the vout value "fee": x.xxx, (numeric) The amount of the fee in BTC. This is negative and only available for the 'send' category of transactions. "abandoned": xxx (bool) 'true' if the transaction has been abandoned (inputs are respendable). Only available for the 'send' category of transactions. } ,... ], "hex" : "data" (string) Raw data for transaction } Examples: > ethereum-cli gettransaction "0xa4cb352eaff243fc962db84c1ab9e180bf97857adda51e2a417bf8015f05def3" > ethereum-cli gettransaction "0xa4cb352eaff243fc962db84c1ab9e180bf97857adda51e2a417bf8015f05def3" true > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "gettransaction", "params": ["0xa4cb352eaff243fc962db84c1ab9e180bf97857adda51e2a417bf8015f05def3"] }' http://127.0.0.01:9500/ """ # TODO: Make workable include_watchonly flag transaction, addresses = await asyncio.gather( self._rpc.eth_getTransactionByHash(txid), self._rpc.eth_accounts() ) if transaction is None: raise BadResponseError('Invalid or non-wallet transaction id', code=-5) trans_info = { 'amount': wei_to_ether(hex_to_dec(transaction['value'])), 'blockhash': transaction['blockHash'], 'blockindex': None, 'blocktime': None, 'confirmations': 0, 'trusted': None, 'walletconflicts': [], 'txid': transaction['hash'], 'time': None, 'timereceived': None, 'details': [], 'hex': transaction['input'], 'fee': DEFAUT_FEE, } if hex_to_dec(transaction['blockHash']) != 0: block = await self.getblock(transaction['blockHash']) trans_info['confirmations'] = block['confirmations'] else: trans_info['confirmations'] = 0 if transaction['to'] in addresses: trans_info['details'].append({ 'address': transaction['to'], 'category': 'receive', 'amount': trans_info['amount'], 'label': '', 'vout': 1 }) if transaction['from'] in addresses: from_ = { 'address': transaction['to'], 'category': 'send', 'amount': operator.neg(trans_info['amount']), 'vout': 1, 'abandoned': False, 'fee': DEFAUT_FEE, } if hex_to_dec(transaction['blockHash']): tr_hash, tr_receipt = await asyncio.gather( self._rpc.eth_getTransactionByHash(transaction['hash']), self._rpc.eth_getTransactionReceipt(transaction['hash']) ) from_['fee'] = (hex_to_dec(tr_hash['gasPrice']) * wei_to_ether( hex_to_dec(tr_receipt['gasUsed']))) trans_info['details'].append(from_) return trans_info @Method.registry(Category.Wallet) async def getnewaddress(self, passphrase): """getnewaddress ( "passphrase" ) Returns a new Ethereum address for receiving payments. Arguments: 1. "passphrase" (string, required) The password for new account. If not provided, the default password "" is used. Result: "passphrase" (string) The passphrase for address Examples: > ethereum-cli getnewaddress "passphrase" > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "getnewaddress", "params": ["passphrase"] }' http://127.0.0.01:9500/ """ return await self._rpc.personal_newAccount(passphrase) @Method.registry(Category.Wallet) async def sendfrom(self, fromaccount, toaddress, amount, minconf=1, comment="", comment_to=""): """sendfrom "fromaccount" "toaddress" amount ( minconf "comment" "comment_to" ) DEPRECATED (use sendtoaddress). Sent an amount from an account to a ethereum address. Requires wallet passphrase to be set with walletpassphrase call. Arguments: 1. "fromaccount" (string, required) The name of the account to send funds from. May be the default account using "". Specifying an account does not influence coin selection, but it does associate the newly created transaction with the account, so the account's balance computation and transaction history can reflect the spend. 2. "toaddress" (string, required) The ethereum address to send funds to. 3. amount (numeric or string, required) The amount in BTC (transaction fee is added on top). 4. minconf (numeric, optional, default=1) Only use funds with at least this many confirmations. 5. "comment" (string, optional) A comment used to store what the transaction is for. This is not part of the transaction, just kept in your wallet. 6. "comment_to" (string, optional) An optional comment to store the name of the person or organization to which you're sending the transaction. This is not part of the transaction, it is just kept in your wallet. Result: "txid" (string) The transaction id. Examples: Send 0.01 ETH from the coinbase account to the address, must have at least 1 confirmation > ethereum-cli sendfrom "" "0xc729d1e61e94e0029865d759327667a6abf0cdc5" 0.01 Send 0.01 from the tabby account to the given address, funds must have at least 6 confirmations > ethereum-cli sendfrom "tabby" "0xc729d1e61e94e0029865d759327667a6abf0cdc5" 0.01 6 "donation" "seans outpost" As a json rpc call > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "sendfrom", "params": ["0xc729d1e61e94e0029865d759327667a6abf0cdc5", 0.01, 6, "donation", "seans outpost"] }' http://127.0.0.01:9500/ """ # TODO: add default fromaccount if empty as coinbase # TODO: Add amount and address validation # TODO: Add minconf logic gas = await self._paytxfee_to_etherfee() try: return await self._rpc.eth_sendTransaction( fromaccount, # from ??? toaddress, # to gas['gas_amount'], # gas amount gas['gas_price'], # gas price ether_to_wei(float(amount)), # value ) except BadResponseError as e: if ( e.code == -32000 and 'gas * price + value' in e.msg ): raise BadResponseError('Insufficient funds', code=-6) raise @Method.registry(Category.Wallet) async def sendtoaddress(self, address, amount, comment="", comment_to="", subtractfeefromamount=False): """sendtoaddress "address" amount ( "comment" "comment_to" subtractfeefromamount ) Send an amount to a given address from coinbase. Arguments: 1. "address" (string, required) The ethereum address to send to. 2. "amount" (numeric or string, required) The amount in ETH to send. eg 0.1 3. "comment" (string, optional) DEPRECATED. A comment used to store what the transaction is for. This is not part of the transaction, just kept in your wallet. 4. "comment_to" (string, optional) DEPRECATED. A comment to store the name of the person or organization to which you're sending the transaction. This is not part of the transaction, just kept in your wallet. 5. subtractfeefromamount (boolean, optional, default=false) The fee will be deducted from the amount being sent. The recipient will receive less ether than you enter in the amount field. Result: "txid" (string) The transaction id. Examples: > ethereum-cli sendtoaddress "0xc729d1e61e94e0029865d759327667a6abf0cdc5" 0.1 > ethereum-cli sendtoaddress "0xc729d1e61e94e0029865d759327667a6abf0cdc5" 0.1 "donation" "seans outpost" > ethereum-cli sendtoaddress "0xc729d1e61e94e0029865d759327667a6abf0cdc5" 0.1 "" "" true > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "sendtoaddress", "params": ["0xc729d1e61e94e0029865d759327667a6abf0cdc5", 0.1, "donation", "seans outpost"] }' http://127.0.0.01:9500/ """ # TODO: Add subtractfeefromamount logic # TODO: Add amount and address validation return await self.sendfrom((await self._rpc.eth_coinbase()), address, amount) @Method.registry(Category.Blockchain) async def getblockcount(self): """getblockcount Returns the number of blocks in the longest blockchain. Result: n (numeric) The current block count Examples: > ethereum-cli getblockcount > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "getblockcount", "params": [] }' http://127.0.0.01:9500/ """ # TODO: What happen when no blocks in db? return await self._rpc.eth_blockNumber() @Method.registry(Category.Blockchain) async def getbestblockhash(self): """getbestblockhash Returns the hash of the best (tip) block in the longest blockchain. Result: "hex" (string) the block hash hex encoded Examples: > ethereum-cli getbestblockhash > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "getbestblockhash", "params": [] }' http://127.0.0.01:9500/ """ # TODO: What happen when no blocks in db? block = await self._rpc.eth_getBlockByNumber(tx_objects=False) if block is None: raise BadResponseError('Block not found', code=-5) return block['hash'] @Method.registry(Category.Blockchain) async def getblock(self, blockhash, verbose=True): """getblock "blockhash" ( verbose ) If verbose is false, returns a string that is serialized, hex-encoded data for block 'hash'. If verbose is true, returns an Object with information about block <hash>. Arguments: 1. "blockhash" (string, required) The block hash 2. verbose (boolean, optional, default=true) true for a json object, false for the hex encoded data Result (for verbose = true): { "hash" : "hash", (string) the block hash (same as provided) "confirmations" : n, (numeric) The number of confirmations, or -1 if the block is not on the main chain "size" : n, (numeric) The block size "strippedsize" : n, (numeric) The block size excluding witness data "weight" : n (numeric) The block weight "height" : n, (numeric) The block height or index "version" : n, (numeric) The block version "versionHex" : "00000000", (string) The block version formatted in hexadecimal "merkleroot" : "xxxx", (string) The merkle root "tx" : [ (array of string) The transaction ids "transactionid" (string) The transaction id ,... ], "time" : ttt, (numeric) The block time in seconds since epoch (Jan 1 1970 GMT) "mediantime" : ttt, (numeric) The median block time in seconds since epoch (Jan 1 1970 GMT) "nonce" : n, (numeric) The nonce "bits" : "1d00ffff", (string) The bits "difficulty" : x.xxx, (numeric) The difficulty "chainwork" : "xxxx", (string) Expected number of hashes required to produce the chain up to this block (in hex) "previousblockhash" : "hash", (string) The hash of the previous block "nextblockhash" : "hash" (string) The hash of the next block } Result (for verbose=false): "data" (string) A string that is serialized, hex-encoded data for block 'hash'. Examples: > ethereum-cli getblock "0x8b22f9aa6c27231fb4acc587300abadd259f501ba99ef18d11e9e4dfa741eb39" > curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc": "1.0", "id":"curltest", "method": "getblock", "params": ["0x8b22f9aa6c27231fb4acc587300abadd259f501ba99ef18d11e9e4dfa741eb39"] }' http://127.0.0.01:9500/ """ block = await self._rpc.eth_getBlockByHash(blockhash, False) if block is None: raise BadResponseError('Block not found', code=-5) if not verbose: return block['hash'] next_block, confirmations = await asyncio.gather( self._rpc.eth_getBlockByNumber( hex_to_dec(block['number']) + 1, False), self._get_confirmations(block), ) data = { 'hash': block['hash'], 'confirmations': (await self._get_confirmations(block)), 'strippedsize': None, 'size': None, 'weight': None, 'height': None, 'version': None, 'versionHex': None, 'merkleroot': None, 'tx': block['transactions'], 'time': hex_to_dec(block['timestamp']), 'mediantime': None, 'nonce': hex_to_dec(block['nonce']), 'bits': None, 'difficulty': hex_to_dec(block['totalDifficulty']), 'chainwork': None, 'previousblockhash': block['parentHash'] } if next_block: data['nextblockhash'] = next_block['hash'] return data # UTILS METHODS async def _paytxfee_to_etherfee(self): try: gas_price = ether_to_wei(self._paytxfee / GAS_AMOUNT) except AttributeError: gas_price = await self._rpc.eth_gasPrice() finally: return { 'gas_amount': GAS_AMOUNT, 'gas_price': gas_price, } async def _calculate_confirmations(self, response): return (await self._rpc.eth_blockNumber() - hex_to_dec(response['number'])) async def _get_confirmations(self, block): last_block_number = await self._rpc.eth_blockNumber() if not last_block_number: raise RuntimeError('Blockchain not synced.') if not block['number']: return 0 return (last_block_number - hex_to_dec(block['number'])) async def create_ethereumd_proxy(uri, timeout=60, *, loop=None): rpc = await create_ethereum_client(uri, timeout, loop=loop) return EthereumProxy(rpc)
mit
1,286,945,215,647,437,800
42.335723
236
0.611381
false