docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Start a subscription for profit and loss events for single positions. Returns a :class:`.PnLSingle` object that is kept live updated. The result can also be queried from :meth:`.pnlSingle`. https://interactivebrokers.github.io/tws-api/pnl.html Args: account: Subscribe to this account. modelCode: Filter for this account model. conId: Filter for this contract ID.
def reqPnLSingle( self, account: str, modelCode: str, conId: int) -> PnLSingle: key = (account, modelCode, conId) assert key not in self.wrapper.pnlSingleKey2ReqId reqId = self.client.getReqId() self.wrapper.pnlSingleKey2ReqId[key] = reqId pnlSingle = PnLSingle(account, modelCode, conId) self.wrapper.pnlSingles[reqId] = pnlSingle self.client.reqPnLSingle(reqId, account, modelCode, conId) return pnlSingle
104,845
Cancel PnLSingle subscription for the given account, modelCode and conId. Args: account: Cancel for this account name. modelCode: Cancel for this account model. conId: Cancel for this contract ID.
def cancelPnLSingle( self, account: str, modelCode: str, conId: int): key = (account, modelCode, conId) reqId = self.wrapper.pnlSingleKey2ReqId.pop(key, None) if reqId: self.client.cancelPnLSingle(reqId) self.wrapper.pnlSingles.pop(reqId, None) else: self._logger.error( 'cancelPnLSingle: No subscription for ' f'account {account}, modelCode {modelCode}, conId {conId}')
104,846
Get a list of contract details that match the given contract. If the returned list is empty then the contract is not known; If the list has multiple values then the contract is ambiguous. The fully qualified contract is available in the the ContractDetails.contract attribute. This method is blocking. https://interactivebrokers.github.io/tws-api/contract_details.html Args: contract: The contract to get details for.
def reqContractDetails(self, contract: Contract) -> List[ContractDetails]: return self._run(self.reqContractDetailsAsync(contract))
104,847
Request contract descriptions of contracts that match a pattern. This method is blocking. https://interactivebrokers.github.io/tws-api/matching_symbols.html Args: pattern: The first few letters of the ticker symbol, or for longer strings a character sequence matching a word in the security name.
def reqMatchingSymbols(self, pattern: str) -> List[ContractDescription]: return self._run(self.reqMatchingSymbolsAsync(pattern))
104,848
Request price increments rule. https://interactivebrokers.github.io/tws-api/minimum_increment.html Args: marketRuleId: ID of market rule. The market rule IDs for a contract can be obtained via :meth:`.reqContractDetails` from :class:`.ContractDetails`.marketRuleIds, which contains a comma separated string of market rule IDs.
def reqMarketRule(self, marketRuleId: int) -> PriceIncrement: return self._run(self.reqMarketRuleAsync(marketRuleId))
104,849
Request realtime 5 second bars. https://interactivebrokers.github.io/tws-api/realtime_bars.html Args: contract: Contract of interest. barSize: Must be 5. whatToShow: Specifies the source for constructing bars. Can be 'TRADES', 'MIDPOINT', 'BID' or 'ASK'. useRTH: If True then only show data from within Regular Trading Hours, if False then show all data. realTimeBarsOptions: Unknown.
def reqRealTimeBars( self, contract: Contract, barSize: int, whatToShow: str, useRTH: bool, realTimeBarsOptions: List[TagValue] = None) -> RealTimeBarList: reqId = self.client.getReqId() bars = RealTimeBarList() bars.reqId = reqId bars.contract = contract bars.barSize = barSize bars.whatToShow = whatToShow bars.useRTH = useRTH bars.realTimeBarsOptions = realTimeBarsOptions self.wrapper.startSubscription(reqId, bars, contract) self.client.reqRealTimeBars( reqId, contract, barSize, whatToShow, useRTH, realTimeBarsOptions) return bars
104,850
Cancel the realtime bars subscription. Args: bars: The bar list that was obtained from ``reqRealTimeBars``.
def cancelRealTimeBars(self, bars: RealTimeBarList): self.client.cancelRealTimeBars(bars.reqId) self.wrapper.endSubscription(bars)
104,851
Cancel the update subscription for the historical bars. Args: bars: The bar list that was obtained from ``reqHistoricalData`` with a keepUpToDate subscription.
def cancelHistoricalData(self, bars: BarDataList): self.client.cancelHistoricalData(bars.reqId) self.wrapper.endSubscription(bars)
104,853
Get the datetime of earliest available historical data for the contract. Args: contract: Contract of interest. useRTH: If True then only show data from within Regular Trading Hours, if False then show all data. formatDate: If set to 2 then the result is returned as a timezone-aware datetime.datetime with UTC timezone.
def reqHeadTimeStamp( self, contract: Contract, whatToShow: str, useRTH: bool, formatDate: int = 1) -> datetime.datetime: return self._run( self.reqHeadTimeStampAsync( contract, whatToShow, useRTH, formatDate))
104,855
Unsubscribe from realtime streaming tick data. Args: contract: The exact contract object that was used to subscribe with.
def cancelMktData(self, contract: Contract): ticker = self.ticker(contract) reqId = self.wrapper.endTicker(ticker, 'mktData') if reqId: self.client.cancelMktData(reqId) else: self._logger.error( 'cancelMktData: ' f'No reqId found for contract {contract}')
104,857
Subscribe to tick-by-tick data and return the Ticker that holds the ticks in ticker.tickByTicks. https://interactivebrokers.github.io/tws-api/tick_data.html Args: contract: Contract of interest. tickType: One of 'Last', 'AllLast', 'BidAsk' or 'MidPoint'. numberOfTicks: Number of ticks or 0 for unlimited. ignoreSize: Ignore bid/ask ticks that only update the size.
def reqTickByTickData( self, contract: Contract, tickType: str, numberOfTicks: int = 0, ignoreSize: bool = False) -> Ticker: reqId = self.client.getReqId() ticker = self.wrapper.startTicker(reqId, contract, tickType) self.client.reqTickByTickData( reqId, contract, tickType, numberOfTicks, ignoreSize) return ticker
104,858
Unsubscribe from tick-by-tick data Args: contract: The exact contract object that was used to subscribe with.
def cancelTickByTickData(self, contract: Contract, tickType: str): ticker = self.ticker(contract) reqId = self.wrapper.endTicker(ticker, tickType) if reqId: self.client.cancelTickByTickData(reqId) else: self._logger.error( f'cancelMktData: No reqId found for contract {contract}')
104,859
Unsubscribe from market depth data. Args: contract: The exact contract object that was used to subscribe with.
def cancelMktDepth(self, contract: Contract, isSmartDepth=False): ticker = self.ticker(contract) reqId = self.wrapper.endTicker(ticker, 'mktDepth') if reqId: self.client.cancelMktDepth(reqId, isSmartDepth) else: self._logger.error( f'cancelMktDepth: No reqId found for contract {contract}')
104,861
Request histogram data. This method is blocking. https://interactivebrokers.github.io/tws-api/histograms.html Args: contract: Contract to query. useRTH: If True then only show data from within Regular Trading Hours, if False then show all data. period: Period of which data is being requested, for example '3 days'.
def reqHistogramData( self, contract: Contract, useRTH: bool, period: str) -> List[HistogramData]: return self._run( self.reqHistogramDataAsync(contract, useRTH, period))
104,862
Do a blocking market scan by starting a subscription and canceling it after the initial list of results are in. This method is blocking. https://interactivebrokers.github.io/tws-api/market_scanners.html Args: subscription: Basic filters. scannerSubscriptionOptions: Unknown. scannerSubscriptionFilterOptions: Advanced generic filters.
def reqScannerData( self, subscription: ScannerSubscription, scannerSubscriptionOptions: List[TagValue] = None, scannerSubscriptionFilterOptions: List[TagValue] = None) -> ScanDataList: return self._run( self.reqScannerDataAsync( subscription, scannerSubscriptionOptions, scannerSubscriptionFilterOptions))
104,864
Subscribe to market scan data. https://interactivebrokers.github.io/tws-api/market_scanners.html Args: subscription: What to scan for. scannerSubscriptionOptions: Unknown. scannerSubscriptionFilterOptions: Unknown.
def reqScannerSubscription( self, subscription: ScannerSubscription, scannerSubscriptionOptions: List[TagValue] = None, scannerSubscriptionFilterOptions: List[TagValue] = None) -> ScanDataList: reqId = self.client.getReqId() dataList = ScanDataList() dataList.reqId = reqId dataList.subscription = subscription dataList.scannerSubscriptionOptions = scannerSubscriptionOptions dataList.scannerSubscriptionFilterOptions = \ scannerSubscriptionFilterOptions self.wrapper.startSubscription(reqId, dataList) self.client.reqScannerSubscription( reqId, subscription, scannerSubscriptionOptions, scannerSubscriptionFilterOptions) return dataList
104,865
Cancel market data subscription. https://interactivebrokers.github.io/tws-api/market_scanners.html Args: dataList: The scan data list that was obtained from :meth:`.reqScannerSubscription`.
def cancelScannerSubscription(self, dataList: ScanDataList): self.client.cancelScannerSubscription(dataList.reqId) self.wrapper.endSubscription(dataList)
104,866
Calculate the volatility given the option price. This method is blocking. https://interactivebrokers.github.io/tws-api/option_computations.html Args: contract: Option contract. optionPrice: Option price to use in calculation. underPrice: Price of the underlier to use in calculation implVolOptions: Unknown
def calculateImpliedVolatility( self, contract: Contract, optionPrice: float, underPrice: float, implVolOptions: List[TagValue] = None) -> OptionComputation: return self._run( self.calculateImpliedVolatilityAsync( contract, optionPrice, underPrice, implVolOptions))
104,867
Calculate the option price given the volatility. This method is blocking. https://interactivebrokers.github.io/tws-api/option_computations.html Args: contract: Option contract. volatility: Option volatility to use in calculation. underPrice: Price of the underlier to use in calculation implVolOptions: Unknown
def calculateOptionPrice( self, contract: Contract, volatility: float, underPrice: float, optPrcOptions=None) -> OptionComputation: return self._run( self.calculateOptionPriceAsync( contract, volatility, underPrice, optPrcOptions))
104,868
Get the option chain. This method is blocking. https://interactivebrokers.github.io/tws-api/options.html Args: underlyingSymbol: Symbol of underlier contract. futFopExchange: Exchange (only for ``FuturesOption``, otherwise leave blank). underlyingSecType: The type of the underlying security, like 'STK' or 'FUT'. underlyingConId: conId of the underlying contract.
def reqSecDefOptParams( self, underlyingSymbol: str, futFopExchange: str, underlyingSecType: str, underlyingConId: int) -> List[OptionChain]: return self._run( self.reqSecDefOptParamsAsync( underlyingSymbol, futFopExchange, underlyingSecType, underlyingConId))
104,869
Get the body of a news article. This method is blocking. https://interactivebrokers.github.io/tws-api/news.html Args: providerCode: Code indicating news provider, like 'BZ' or 'FLY'. articleId: ID of the specific article. newsArticleOptions: Unknown.
def reqNewsArticle( self, providerCode: str, articleId: str, newsArticleOptions: List[TagValue] = None) -> NewsArticle: return self._run( self.reqNewsArticleAsync( providerCode, articleId, newsArticleOptions))
104,871
Replaces Financial Advisor's settings. Args: faDataType: See :meth:`.requestFA`. xml: The XML-formatted configuration string.
def replaceFA(self, faDataType: int, xml: str): self.client.replaceFA(faDataType, xml)
104,873
Schedule the callback to be run at the given time with the given arguments. Args: time: Time to run callback. If given as :py:class:`datetime.time` then use today as date. callback: Callable scheduled to run. args: Arguments for to call callback with.
def schedule( time: Union[datetime.time, datetime.datetime], callback: Callable, *args): dt = _fillDate(time) now = datetime.datetime.now(dt.tzinfo) delay = (dt - now).total_seconds() loop = asyncio.get_event_loop() loop.call_later(delay, callback, *args)
104,912
Iterator that waits periodically until certain time points are reached while yielding those time points. Args: start: Start time, can be specified as datetime.datetime, or as datetime.time in which case today is used as the date end: End time, can be specified as datetime.datetime, or as datetime.time in which case today is used as the date step (float): The number of seconds of each period
def timeRange( start: datetime.time, end: datetime.time, step: float) -> Iterator[datetime.datetime]: assert step > 0 start = _fillDate(start) end = _fillDate(end) delta = datetime.timedelta(seconds=step) t = start while t < datetime.datetime.now(): t += delta while t <= end: waitUntil(t) yield t t += delta
104,913
Run combined Qt5/asyncio event loop. Args: qtLib: Name of Qt library to use, can be 'PyQt5' or 'PySide2'. period: Period in seconds to poll Qt.
def useQt(qtLib: str = 'PyQt5', period: float = 0.01): def qt_step(): loop.call_later(period, qt_step) if not stack: qloop = QEventLoop() timer = QTimer() timer.timeout.connect(qloop.quit) stack.append((qloop, timer)) qloop, timer = stack.pop() timer.start(0) qloop.exec_() timer.stop() stack.append((qloop, timer)) if qtLib not in ('PyQt5', 'PySide2'): raise RuntimeError(f'Unknown Qt library: {qtLib}') if qtLib == 'PyQt5': from PyQt5.Qt import QApplication, QTimer, QEventLoop else: from PySide2.QtWidgets import QApplication from PySide2.QtCore import QTimer, QEventLoop global qApp qApp = QApplication.instance() or QApplication(sys.argv) loop = asyncio.get_event_loop() stack: list = [] qt_step()
104,917
Stock contract. Args: symbol: Symbol name. exchange: Destination exchange. currency: Underlying currency.
def __init__( self, symbol: str = '', exchange: str = '', currency: str = '', **kwargs): Contract.__init__( self, secType='STK', symbol=symbol, exchange=exchange, currency=currency, **kwargs)
105,081
Foreign exchange currency pair. Args: pair: Shortcut for specifying symbol and currency, like 'EURUSD'. exchange: Destination exchange. symbol: Base currency. currency: Quote currency.
def __init__( self, pair: str = '', exchange: str = 'IDEALPRO', symbol: str = '', currency: str = '', **kwargs): if pair: assert len(pair) == 6 symbol = symbol or pair[:3] currency = currency or pair[3:] Contract.__init__( self, 'CASH', symbol=symbol, exchange=exchange, currency=currency, **kwargs)
105,083
To check if word is present in the keyword_trie_dict Args: word : string word that you want to check Returns: status : bool If word is present as it is in keyword_trie_dict then we return True, else False Examples: >>> keyword_processor.add_keyword('Big Apple') >>> 'Big Apple' in keyword_processor >>> # True
def __contains__(self, word): if not self.case_sensitive: word = word.lower() current_dict = self.keyword_trie_dict len_covered = 0 for char in word: if char in current_dict: current_dict = current_dict[char] len_covered += 1 else: break return self._keyword in current_dict and len_covered == len(word)
105,111
To add keyword to the dictionary pass the keyword and the clean name it maps to. Args: keyword : string keyword that you want to identify clean_name : string clean term for that keyword that you would want to get back in return or replace if not provided, keyword will be used as the clean name also. Examples: >>> keyword_processor['Big Apple'] = 'New York'
def __setitem__(self, keyword, clean_name=None): status = False if not clean_name and keyword: clean_name = keyword if keyword and clean_name: if not self.case_sensitive: keyword = keyword.lower() current_dict = self.keyword_trie_dict for letter in keyword: current_dict = current_dict.setdefault(letter, {}) if self._keyword not in current_dict: status = True self._terms_in_trie += 1 current_dict[self._keyword] = clean_name return status
105,112
To remove keyword from the dictionary pass the keyword and the clean name it maps to. Args: keyword : string keyword that you want to remove if it's present Examples: >>> keyword_processor.add_keyword('Big Apple') >>> del keyword_processor['Big Apple']
def __delitem__(self, keyword): status = False if keyword: if not self.case_sensitive: keyword = keyword.lower() current_dict = self.keyword_trie_dict character_trie_list = [] for letter in keyword: if letter in current_dict: character_trie_list.append((letter, current_dict)) current_dict = current_dict[letter] else: # if character is not found, break out of the loop current_dict = None break # remove the characters from trie dict if there are no other keywords with them if current_dict and self._keyword in current_dict: # we found a complete match for input keyword. character_trie_list.append((self._keyword, current_dict)) character_trie_list.reverse() for key_to_remove, dict_pointer in character_trie_list: if len(dict_pointer.keys()) == 1: dict_pointer.pop(key_to_remove) else: # more than one key means more than 1 path. # Delete not required path and keep the other dict_pointer.pop(key_to_remove) break # successfully removed keyword status = True self._terms_in_trie -= 1 return status
105,113
To add keywords from a list Args: keyword_list (list(str)): List of keywords to add Examples: >>> keyword_processor.add_keywords_from_list(["java", "python"]}) Raises: AttributeError: If `keyword_list` is not a list.
def add_keywords_from_list(self, keyword_list): if not isinstance(keyword_list, list): raise AttributeError("keyword_list should be a list") for keyword in keyword_list: self.add_keyword(keyword)
105,117
To remove keywords present in list Args: keyword_list (list(str)): List of keywords to remove Examples: >>> keyword_processor.remove_keywords_from_list(["java", "python"]}) Raises: AttributeError: If `keyword_list` is not a list.
def remove_keywords_from_list(self, keyword_list): if not isinstance(keyword_list, list): raise AttributeError("keyword_list should be a list") for keyword in keyword_list: self.remove_keyword(keyword)
105,118
Normalize datetime intervals. Given a pair of datetime.date or datetime.datetime objects, returns a 2-tuple of tz-aware UTC datetimes spanning the same interval. For datetime.date objects, the returned interval starts at 00:00:00.0 on the first date and ends at 00:00:00.0 on the second. Naive datetimes are upgraded to UTC. Timezone-aware datetimes are normalized to the UTC tzdata. Params: - start: A date or datetime - end: A date or datetime
def _normalize_interval(start, end, value): if not isinstance(start, datetime): start = datetime.combine(start, START_OF_DAY) end = datetime.combine(end, START_OF_DAY) if start.tzinfo is None: start = pytz.UTC.localize(start) end = pytz.UTC.localize(end) else: start = start.astimezone(pytz.UTC) end = end.astimezone(pytz.UTC) return start, end
106,213
Performs a keyboard key press without the release. This will put that key in a held down state. NOTE: For some reason, this does not seem to cause key repeats like would happen if a keyboard key was held down on a text field. Args: key (str): The key to be pressed down. The valid names are listed in pyautogui.KEY_NAMES. Returns: None
def _keyDown(key): if key not in keyboardMapping or keyboardMapping[key] is None: return if type(key) == int: fake_input(_display, X.KeyPress, key) _display.sync() return needsShift = pyautogui.isShiftCharacter(key) if needsShift: fake_input(_display, X.KeyPress, keyboardMapping['shift']) fake_input(_display, X.KeyPress, keyboardMapping[key]) if needsShift: fake_input(_display, X.KeyRelease, keyboardMapping['shift']) _display.sync()
106,250
Performs a keyboard key release (without the press down beforehand). Args: key (str): The key to be released up. The valid names are listed in pyautogui.KEY_NAMES. Returns: None
def _keyUp(key): if key not in keyboardMapping or keyboardMapping[key] is None: return if type(key) == int: keycode = key else: keycode = keyboardMapping[key] fake_input(_display, X.KeyRelease, keycode) _display.sync()
106,251
Return Window object if 'title' or its part found in visible windows titles, else return None Return only 1 window found first Args: title: unicode string exact (bool): True if search only exact match
def getWindow(title, exact=False): titles = getWindows() hwnd = titles.get(title, None) if not hwnd and not exact: for k, v in titles.items(): if title in k: hwnd = v break if hwnd: return Window(hwnd) else: return None
106,253
Returns the current xy coordinates of the mouse cursor as a two-integer tuple. Args: x (int, None, optional) - If not None, this argument overrides the x in the return value. y (int, None, optional) - If not None, this argument overrides the y in the return value. Returns: (x, y) tuple of the current xy coordinates of the mouse cursor.
def position(x=None, y=None): posx, posy = platformModule._position() posx = int(posx) posy = int(posy) if x is not None: # If set, the x parameter overrides the return value. posx = int(x) if y is not None: # If set, the y parameter overrides the return value. posy = int(y) return Point(posx, posy)
106,261
Returns whether the given xy coordinates are on the screen or not. Args: Either the arguments are two separate values, first arg for x and second for y, or there is a single argument of a sequence with two values, the first x and the second y. Example: onScreen(x, y) or onScreen([x, y]) Returns: bool: True if the xy coordinates are on the screen at its current resolution, otherwise False.
def onScreen(x, y=None): x, y = _unpackXY(x, y) x = int(x) y = int(y) width, height = platformModule._size() return 0 <= x < width and 0 <= y < height
106,262
Performs a keyboard key press without the release. This will put that key in a held down state. NOTE: For some reason, this does not seem to cause key repeats like would happen if a keyboard key was held down on a text field. Args: key (str): The key to be pressed down. The valid names are listed in KEYBOARD_KEYS. Returns: None
def keyDown(key, pause=None, _pause=True): if len(key) > 1: key = key.lower() _failSafeCheck() platformModule._keyDown(key) _autoPause(pause, _pause)
106,275
Performs a keyboard key release (without the press down beforehand). Args: key (str): The key to be released up. The valid names are listed in KEYBOARD_KEYS. Returns: None
def keyUp(key, pause=None, _pause=True): if len(key) > 1: key = key.lower() _failSafeCheck() platformModule._keyUp(key) _autoPause(pause, _pause)
106,276
Performs a keyboard key press without the release. This will put that key in a held down state. NOTE: For some reason, this does not seem to cause key repeats like would happen if a keyboard key was held down on a text field. Args: key (str): The key to be pressed down. The valid names are listed in pyautogui.KEY_NAMES. Returns: None
def _keyDown(key): if key not in keyboardMapping or keyboardMapping[key] is None: return needsShift = pyautogui.isShiftCharacter(key) mods, vkCode = divmod(keyboardMapping[key], 0x100) for apply_mod, vk_mod in [(mods & 4, 0x12), (mods & 2, 0x11), (mods & 1 or needsShift, 0x10)]: #HANKAKU not suported! mods & 8 if apply_mod: ctypes.windll.user32.keybd_event(vk_mod, 0, 0, 0) # ctypes.windll.user32.keybd_event(vkCode, 0, 0, 0) for apply_mod, vk_mod in [(mods & 1 or needsShift, 0x10), (mods & 2, 0x11), (mods & 4, 0x12)]: #HANKAKU not suported! mods & 8 if apply_mod: ctypes.windll.user32.keybd_event(vk_mod, 0, KEYEVENTF_KEYUP, 0)
106,282
Send the mouse down event to Windows by calling the mouse_event() win32 function. Args: x (int): The x position of the mouse event. y (int): The y position of the mouse event. button (str): The mouse button, either 'left', 'middle', or 'right' Returns: None
def _mouseDown(x, y, button): if button == 'left': try: _sendMouseEvent(MOUSEEVENTF_LEFTDOWN, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass elif button == 'middle': try: _sendMouseEvent(MOUSEEVENTF_MIDDLEDOWN, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass elif button == 'right': try: _sendMouseEvent(MOUSEEVENTF_RIGHTDOWN, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass else: assert False, "button argument not in ('left', 'middle', 'right')"
106,284
Send the mouse up event to Windows by calling the mouse_event() win32 function. Args: x (int): The x position of the mouse event. y (int): The y position of the mouse event. button (str): The mouse button, either 'left', 'middle', or 'right' Returns: None
def _mouseUp(x, y, button): if button == 'left': try: _sendMouseEvent(MOUSEEVENTF_LEFTUP, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass elif button == 'middle': try: _sendMouseEvent(MOUSEEVENTF_MIDDLEUP, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass elif button == 'right': try: _sendMouseEvent(MOUSEEVENTF_RIGHTUP, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass else: assert False, "button argument not in ('left', 'middle', 'right')"
106,285
Send the mouse click event to Windows by calling the mouse_event() win32 function. Args: button (str): The mouse button, either 'left', 'middle', or 'right' x (int): The x position of the mouse event. y (int): The y position of the mouse event. Returns: None
def _click(x, y, button): if button == 'left': try: _sendMouseEvent(MOUSEEVENTF_LEFTCLICK, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass elif button == 'middle': try: _sendMouseEvent(MOUSEEVENTF_MIDDLECLICK, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass elif button == 'right': try: _sendMouseEvent(MOUSEEVENTF_RIGHTCLICK, x, y) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass else: assert False, "button argument not in ('left', 'middle', 'right')"
106,286
The helper function that actually makes the call to the mouse_event() win32 function. Args: ev (int): The win32 code for the mouse event. Use one of the MOUSEEVENTF_* constants for this argument. x (int): The x position of the mouse event. y (int): The y position of the mouse event. dwData (int): The argument for mouse_event()'s dwData parameter. So far this is only used by mouse scrolling. Returns: None
def _sendMouseEvent(ev, x, y, dwData=0): assert x != None and y != None, 'x and y cannot be set to None' # TODO: ARG! For some reason, SendInput isn't working for mouse events. I'm switching to using the older mouse_event win32 function. #mouseStruct = MOUSEINPUT() #mouseStruct.dx = x #mouseStruct.dy = y #mouseStruct.mouseData = ev #mouseStruct.time = 0 #mouseStruct.dwExtraInfo = ctypes.pointer(ctypes.c_ulong(0)) # according to https://stackoverflow.com/questions/13564851/generate-keyboard-events I can just set this. I don't really care about this value. #inputStruct = INPUT() #inputStruct.mi = mouseStruct #inputStruct.type = INPUT_MOUSE #ctypes.windll.user32.SendInput(1, ctypes.pointer(inputStruct), ctypes.sizeof(inputStruct)) width, height = _size() convertedX = 65536 * x // width + 1 convertedY = 65536 * y // height + 1 ctypes.windll.user32.mouse_event(ev, ctypes.c_long(convertedX), ctypes.c_long(convertedY), dwData, 0)
106,287
Send the mouse vertical scroll event to Windows by calling the mouse_event() win32 function. Args: clicks (int): The amount of scrolling to do. A positive value is the mouse wheel moving forward (scrolling up), a negative value is backwards (down). x (int): The x position of the mouse event. y (int): The y position of the mouse event. Returns: None
def _scroll(clicks, x=None, y=None): startx, starty = _position() width, height = _size() if x is None: x = startx else: if x < 0: x = 0 elif x >= width: x = width - 1 if y is None: y = starty else: if y < 0: y = 0 elif y >= height: y = height - 1 try: _sendMouseEvent(MOUSEEVENTF_WHEEL, x, y, dwData=clicks) except (PermissionError, OSError): # TODO: We need to figure out how to prevent these errors, see https://github.com/asweigart/pyautogui/issues/60 pass
106,288
Helper function for creating `FunctionCall`s with `Arguments`. Args: function: The value to store for the action function. arguments: The values to store for the arguments of the action. Can either be an `Arguments` object, a `dict`, or an iterable. If a `dict` or an iterable is provided, the values will be unpacked into an `Arguments` object. Returns: A new `FunctionCall` instance.
def all_arguments(cls, function, arguments): if isinstance(arguments, dict): arguments = Arguments(**arguments) elif not isinstance(arguments, Arguments): arguments = Arguments(*arguments) return cls(function, arguments)
106,478
Initialize the runconfig with the various directories needed. Args: replay_dir: Where to find replays. Might not be accessible to SC2. data_dir: Where SC2 should find the data and battle.net cache. tmp_dir: The temporary directory. None is system default. cwd: Where to set the current working directory. env: What to pass as the environment variables.
def __init__(self, replay_dir, data_dir, tmp_dir, cwd=None, env=None): self.replay_dir = replay_dir self.data_dir = data_dir self.tmp_dir = tmp_dir self.cwd = cwd self.env = env
106,505
Save a replay to a directory, returning the path to the replay. Args: replay_data: The result of controller.save_replay(), ie the binary data. replay_dir: Where to save the replay. This can be absolute or relative. prefix: Optional prefix for the replay filename. Returns: The full path where the replay is saved. Raises: ValueError: If the prefix contains the path seperator.
def save_replay(self, replay_data, replay_dir, prefix=None): if not prefix: replay_filename = "" elif os.path.sep in prefix: raise ValueError("Prefix '%s' contains '%s', use replay_dir instead." % ( prefix, os.path.sep)) else: replay_filename = prefix + "_" now = datetime.datetime.utcnow().replace(microsecond=0) replay_filename += "%s.SC2Replay" % now.isoformat("-").replace(":", "-") replay_dir = self.abs_replay_path(replay_dir) if not gfile.Exists(replay_dir): gfile.MakeDirs(replay_dir) replay_path = os.path.join(replay_dir, replay_filename) with gfile.Open(replay_path, "wb") as f: f.write(replay_data) return replay_path
106,509
Create a game for the agents to join. Args: map_name: The map to use.
def create_game(self, map_name): map_inst = maps.get(map_name) map_data = map_inst.data(self._run_config) if map_name not in self._saved_maps: for controller in self._controllers: controller.save_map(map_inst.path, map_data) self._saved_maps.add(map_name) # Form the create game message. create = sc_pb.RequestCreateGame( local_map=sc_pb.LocalMap(map_path=map_inst.path), disable_fog=False) # Set up for two agents. for _ in range(self._num_agents): create.player_setup.add(type=sc_pb.Participant) # Create the game. self._controllers[0].create_game(create)
106,549
Create a game, one remote agent vs the specified bot. Args: map_name: The map to use. bot_difficulty: The difficulty of the bot to play against. bot_race: The race for the bot. bot_first: Whether the bot should be player 1 (else is player 2).
def create_game( self, map_name, bot_difficulty=sc_pb.VeryEasy, bot_race=sc_common.Random, bot_first=False): self._controller.ping() # Form the create game message. map_inst = maps.get(map_name) map_data = map_inst.data(self._run_config) if map_name not in self._saved_maps: self._controller.save_map(map_inst.path, map_data) self._saved_maps.add(map_name) create = sc_pb.RequestCreateGame( local_map=sc_pb.LocalMap(map_path=map_inst.path, map_data=map_data), disable_fog=False) # Set up for one bot, one agent. if not bot_first: create.player_setup.add(type=sc_pb.Participant) create.player_setup.add( type=sc_pb.Computer, race=bot_race, difficulty=bot_difficulty) if bot_first: create.player_setup.add(type=sc_pb.Participant) # Create the game. self._controller.create_game(create)
106,552
Apply actions, step the world forward, and return observations. Args: actions: A list of actions meeting the action spec, one per agent. step_mul: If specified, use this rather than the environment's default. Returns: A tuple of TimeStep namedtuples, one per agent.
def step(self, actions, step_mul=None): if self._state == environment.StepType.LAST: return self.reset() skip = not self._ensure_available_actions self._parallel.run( (c.act, f.transform_action(o.observation, a, skip_available=skip)) for c, f, o, a in zip( self._controllers, self._features, self._obs, actions)) self._state = environment.StepType.MID return self._step(step_mul)
106,570
Forwards ports such that multiplayer works between machines. Args: remote_host: Where to ssh to. local_host: "127.0.0.1" or "::1". local_listen_ports: Which ports to listen on locally to forward remotely. remote_listen_ports: Which ports to listen on remotely to forward locally. Returns: The ssh process. Raises: ValueError: if it can't find ssh.
def forward_ports(remote_host, local_host, local_listen_ports, remote_listen_ports): if ":" in local_host and not local_host.startswith("["): local_host = "[%s]" % local_host ssh = whichcraft.which("ssh") or whichcraft.which("plink") if not ssh: raise ValueError("Couldn't find an ssh client.") args = [ssh, remote_host] for local_port in local_listen_ports: args += ["-L", "%s:%s:%s:%s" % (local_host, local_port, local_host, local_port)] for remote_port in remote_listen_ports: args += ["-R", "%s:%s:%s:%s" % (local_host, remote_port, local_host, remote_port)] logging.info("SSH port forwarding: %s", " ".join(args)) return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, close_fds=(os.name == "posix"))
106,602
Initializes the TestEnvironment. The `next_observation` is initialized to be reward = 0., discount = 1., and an appropriately sized observation of all zeros. `episode_length` is set to `float('inf')`. Args: num_agents: The number of agents. observation_spec: The observation specs for each player. action_spec: The action specs for each player.
def __init__(self, num_agents, observation_spec, action_spec): self._num_agents = num_agents self._observation_spec = observation_spec self._action_spec = action_spec self._episode_steps = 0 self.next_timestep = [ environment.TimeStep( step_type=environment.StepType.MID, reward=0., discount=1., observation=self._default_observation(obs_spec, agent_index)) for agent_index, obs_spec in enumerate(observation_spec)] self.episode_length = float('inf')
106,607
A surface to display on screen. Args: surf: The actual pygame.Surface (or subsurface). surf_type: A SurfType, used to tell how to treat clicks in that area. surf_rect: Rect of the surface relative to the window. world_to_surf: Convert a world point to a pixel on the surface. world_to_obs: Convert a world point to a pixel in the observation. draw: A function that draws onto the surface.
def __init__(self, surf, surf_type, surf_rect, world_to_surf, world_to_obs, draw): self.surf = surf self.surf_type = surf_type self.surf_rect = surf_rect self.world_to_surf = world_to_surf self.world_to_obs = world_to_obs self.draw = draw
106,632
Take the game info and the static data needed to set up the game. This must be called before render or get_actions for each game or restart. Args: game_info: A `sc_pb.ResponseGameInfo` object for this game. static_data: A `StaticData` object for this game. Raises: ValueError: if there is nothing to render.
def init(self, game_info, static_data): self._game_info = game_info self._static_data = static_data if not game_info.HasField("start_raw"): raise ValueError("Raw observations are required for the renderer.") self._map_size = point.Point.build(game_info.start_raw.map_size) if game_info.options.HasField("feature_layer"): fl_opts = game_info.options.feature_layer self._feature_screen_px = point.Point.build(fl_opts.resolution) self._feature_minimap_px = point.Point.build(fl_opts.minimap_resolution) self._feature_camera_width_world_units = fl_opts.width self._render_rgb = False else: self._feature_screen_px = self._feature_minimap_px = None if game_info.options.HasField("render"): render_opts = game_info.options.render self._rgb_screen_px = point.Point.build(render_opts.resolution) self._rgb_minimap_px = point.Point.build(render_opts.minimap_resolution) self._render_rgb = True else: self._rgb_screen_px = self._rgb_minimap_px = None if not self._feature_screen_px and not self._rgb_screen_px: raise ValueError("Nothing to render.") try: self.init_window() self._initialized = True except pygame.error as e: self._initialized = False logging.error("-" * 60) logging.error("Failed to initialize pygame: %s", e) logging.error("Continuing without pygame.") logging.error("If you're using ssh and have an X server, try ssh -X.") logging.error("-" * 60) self._obs = sc_pb.ResponseObservation() self._queued_action = None self._queued_hotkey = "" self._select_start = None self._alerts = {} self._past_actions = [] self._help = False
106,641
Create and send a specific request, and return the response. For example: send(ping=sc_pb.RequestPing()) => sc_pb.ResponsePing Args: **kwargs: A single kwarg with the name and value to fill in to Request. Returns: The Response corresponding to your request.
def send(self, **kwargs): assert len(kwargs) == 1, "Must make a single request." res = self.send_req(sc_pb.Request(**kwargs)) return getattr(res, list(kwargs.keys())[0])
106,688
Initialize a Features instance matching the specified interface format. Args: agent_interface_format: See the documentation for `AgentInterfaceFormat`. map_size: The size of the map in world units, needed for feature_units. Raises: ValueError: if agent_interface_format isn't specified. ValueError: if map_size isn't specified when use_feature_units or use_camera_position is.
def __init__(self, agent_interface_format=None, map_size=None): if not agent_interface_format: raise ValueError("Please specify agent_interface_format") self._agent_interface_format = agent_interface_format aif = self._agent_interface_format if (aif.use_feature_units or aif.use_camera_position or aif.use_raw_units): self.init_camera( aif.feature_dimensions, map_size, aif.camera_width_world_units) self._valid_functions = _init_valid_functions( aif.action_dimensions)
106,709
Transform an SC2-style action into an agent-style action. This should be the inverse of `transform_action`. Args: action: a `sc_pb.Action` to be transformed. Returns: A corresponding `actions.FunctionCall`. Raises: ValueError: if it doesn't know how to transform this action.
def reverse_action(self, action): FUNCTIONS = actions.FUNCTIONS # pylint: disable=invalid-name aif = self._agent_interface_format def func_call_ability(ability_id, cmd_type, *args): if ability_id not in actions.ABILITY_IDS: logging.warning("Unknown ability_id: %s. This is probably dance or " "cheer, or some unknown new or map specific ability. " "Treating it as a no-op.", ability_id) return FUNCTIONS.no_op() if aif.hide_specific_actions: general_id = next(iter(actions.ABILITY_IDS[ability_id])).general_id if general_id: ability_id = general_id for func in actions.ABILITY_IDS[ability_id]: if func.function_type is cmd_type: return FUNCTIONS[func.id](*args) raise ValueError("Unknown ability_id: %s, type: %s. Likely a bug." % ( ability_id, cmd_type.__name__)) if action.HasField("action_ui"): act_ui = action.action_ui if act_ui.HasField("multi_panel"): return FUNCTIONS.select_unit(act_ui.multi_panel.type - 1, act_ui.multi_panel.unit_index) if act_ui.HasField("control_group"): return FUNCTIONS.select_control_group( act_ui.control_group.action - 1, act_ui.control_group.control_group_index) if act_ui.HasField("select_idle_worker"): return FUNCTIONS.select_idle_worker(act_ui.select_idle_worker.type - 1) if act_ui.HasField("select_army"): return FUNCTIONS.select_army(act_ui.select_army.selection_add) if act_ui.HasField("select_warp_gates"): return FUNCTIONS.select_warp_gates( act_ui.select_warp_gates.selection_add) if act_ui.HasField("select_larva"): return FUNCTIONS.select_larva() if act_ui.HasField("cargo_panel"): return FUNCTIONS.unload(act_ui.cargo_panel.unit_index) if act_ui.HasField("production_panel"): return FUNCTIONS.build_queue(act_ui.production_panel.unit_index) if act_ui.HasField("toggle_autocast"): return func_call_ability(act_ui.toggle_autocast.ability_id, actions.autocast) if (action.HasField("action_feature_layer") or action.HasField("action_render")): act_sp = actions.spatial(action, aif.action_space) if act_sp.HasField("camera_move"): coord = point.Point.build(act_sp.camera_move.center_minimap) return FUNCTIONS.move_camera(coord) if act_sp.HasField("unit_selection_point"): select_point = act_sp.unit_selection_point coord = point.Point.build(select_point.selection_screen_coord) return FUNCTIONS.select_point(select_point.type - 1, coord) if act_sp.HasField("unit_selection_rect"): select_rect = act_sp.unit_selection_rect # TODO(tewalds): After looking at some replays we should decide if # this is good enough. Maybe we need to simulate multiple actions or # merge the selection rects into a bigger one. tl = point.Point.build(select_rect.selection_screen_coord[0].p0) br = point.Point.build(select_rect.selection_screen_coord[0].p1) return FUNCTIONS.select_rect(select_rect.selection_add, tl, br) if act_sp.HasField("unit_command"): cmd = act_sp.unit_command queue = int(cmd.queue_command) if cmd.HasField("target_screen_coord"): coord = point.Point.build(cmd.target_screen_coord) return func_call_ability(cmd.ability_id, actions.cmd_screen, queue, coord) elif cmd.HasField("target_minimap_coord"): coord = point.Point.build(cmd.target_minimap_coord) return func_call_ability(cmd.ability_id, actions.cmd_minimap, queue, coord) else: return func_call_ability(cmd.ability_id, actions.cmd_quick, queue) if action.HasField("action_raw") or action.HasField("action_render"): raise ValueError("Unknown action:\n%s" % action) return FUNCTIONS.no_op()
106,716
Decorate a function/method to check its timings. To use the function's name: @sw.decorate def func(): pass To name it explicitly: @sw.decorate("name") def random_func_name(): pass Args: name_or_func: the name or the function to decorate. Returns: If a name is passed, returns this as a decorator, otherwise returns the decorated function.
def decorate(self, name_or_func): if os.environ.get("SC2_NO_STOPWATCH"): return name_or_func if callable(name_or_func) else lambda func: func def decorator(name, func): @functools.wraps(func) def _stopwatch(*args, **kwargs): with self(name): return func(*args, **kwargs) return _stopwatch if callable(name_or_func): return decorator(name_or_func.__name__, name_or_func) else: return lambda func: decorator(name_or_func, func)
106,758
Wrapper for _log_counter_per_token. Args: token: The token for which to look up the count. Returns: The number of times this function has been called with *token* as an argument (starting at 0)
def _GetNextLogCountPerToken(token): global _log_counter_per_token # pylint: disable=global-variable-not-assigned _log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1) return _log_counter_per_token[token]
107,664
Log 'msg % args' at level 'level' once per 'n' times. Logs the 1st call, (N+1)st call, (2N+1)st call, etc. Not threadsafe. Args: level: The level at which to log. msg: The message to be logged. n: The number of times this should be called before it is logged. *args: The args to be substituted into the msg.
def log_every_n(level, msg, n, *args): count = _GetNextLogCountPerToken(_GetFileAndLine()) log_if(level, msg, not (count % n), *args)
107,665
A generic function to load mnist-like dataset. Parameters: ---------- shape : tuple The shape of digit images. path : str The path that the data is downloaded to. name : str The dataset name you want to use(the default is 'mnist'). url : str The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/').
def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'): path = os.path.join(path, name) # Define functions for loading mnist-like data's images and labels. # For convenience, they also download the requested files if needed. def load_mnist_images(path, filename): filepath = maybe_download_and_extract(filename, path, url) logging.info(filepath) # Read the inputs in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=16) # The inputs are vectors now, we reshape them to monochrome 2D images, # following the shape convention: (examples, channels, rows, columns) data = data.reshape(shape) # The inputs come as bytes, we convert them to float32 in range [0,1]. # (Actually to range [0, 255/256], for compatibility to the version # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.) return data / np.float32(256) def load_mnist_labels(path, filename): filepath = maybe_download_and_extract(filename, path, url) # Read the labels in Yann LeCun's binary format. with gzip.open(filepath, 'rb') as f: data = np.frombuffer(f.read(), np.uint8, offset=8) # The labels are vectors of integers now, that's exactly what we want. return data # Download and read the training and test set images and labels. logging.info("Load or Download {0} > {1}".format(name.upper(), path)) X_train = load_mnist_images(path, 'train-images-idx3-ubyte.gz') y_train = load_mnist_labels(path, 'train-labels-idx1-ubyte.gz') X_test = load_mnist_images(path, 't10k-images-idx3-ubyte.gz') y_test = load_mnist_labels(path, 't10k-labels-idx1-ubyte.gz') # We reserve the last 10000 training examples for validation. X_train, X_val = X_train[:-10000], X_train[-10000:] y_train, y_val = y_train[:-10000], y_train[-10000:] # We just return all the arrays in order, as expected in main(). # (It doesn't matter how we do this as long as we can read them again.) X_train = np.asarray(X_train, dtype=np.float32) y_train = np.asarray(y_train, dtype=np.int32) X_val = np.asarray(X_val, dtype=np.float32) y_val = np.asarray(y_val, dtype=np.int32) X_test = np.asarray(X_test, dtype=np.float32) y_test = np.asarray(y_test, dtype=np.int32) return X_train, y_train, X_val, y_val, X_test, y_test
107,679
Return zero-filled state tensor(s). Args: batch_size: int, float, or unit Tensor representing the batch size. Returns: tensor of shape '[batch_size x shape[0] x shape[1] x num_features] filled with zeros
def zero_state(self, batch_size, dtype=LayersConfig.tf_dtype): shape = self.shape num_features = self.num_features # TODO : TypeError: 'NoneType' object is not subscriptable zeros = tf.zeros([batch_size, shape[0], shape[1], num_features * 2], dtype=dtype) return zeros
107,872
Perform random distortions on an image. Args: image: A float32 Tensor of shape [height, width, 3] with values in [0, 1). thread_id: Preprocessing thread id used to select the ordering of color distortions. There should be a multiple of 2 preprocessing threads. Returns:```` distorted_image: A float32 Tensor of shape [height, width, 3] with values in [0, 1].
def distort_image(image, thread_id): # Randomly flip horizontally. with tf.name_scope("flip_horizontal"): # , values=[image]): # DH MOdify # with tf.name_scope("flip_horizontal", values=[image]): image = tf.image.random_flip_left_right(image) # Randomly distort the colors based on thread id. color_ordering = thread_id % 2 with tf.name_scope("distort_color"): # , values=[image]): # DH MOdify # with tf.name_scope("distort_color", values=[image]): # DH MOdify if color_ordering == 0: image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.032) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) elif color_ordering == 1: image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.032) # The random_* ops do not necessarily clamp. image = tf.clip_by_value(image, 0.0, 1.0) return image
108,001
Uses RMSProp to compute step from gradients. Args: grads: numpy array of gradients. cache: numpy array of same shape as `grads` as RMSProp cache decay_rate: How fast to decay cache Returns: A tuple of step: numpy array of the same shape as `grads` giving the step. Note that this does not yet take the learning rate into account. cache: Updated RMSProp cache.
def _rmsprop(self, grads, cache=None, decay_rate=0.95): if cache is None: cache = np.zeros_like(grads) cache = decay_rate * cache + (1 - decay_rate) * grads ** 2 step = -grads / np.sqrt(cache + K.epsilon()) return step, cache
108,259
Import a module path and create an api doc from it Args: string (str): string with line breaks to write to file. filename (str): filename without the .md out_path (str): The output directory
def to_md_file(string, filename, out_path="."): md_file = "%s.md" % filename with open(os.path.join(out_path, md_file), "w") as f: f.write(string) print("wrote {}.".format(md_file))
108,266
Initializes the markdown api generator. Args: src_root: The root folder name containing all the sources. Ex: src github_link: The base github link. Should include branch name. Ex: https://github.com/raghakot/keras-vis/tree/master All source links are generated with this prefix.
def __init__(self, src_root, github_link): self.src_root = src_root self.github_link = github_link
108,267
Takes a function (or method) and documents it. Args: clsname (str, optional): class name to prepend to funcname. depth (int, optional): number of ### to append to function name
def func2md(self, func, clsname=None, names=None, depth=3): section = "#" * depth if names is None: names = [func.__name__] funcname = ", ".join(names) escfuncname = ", ".join(["`%s`" % funcname if funcname.startswith("_") else funcname for funcname in names]) header = "%s%s" % ("%s." % clsname if clsname else "", escfuncname) path = self.get_src_path(func) doc = self.doc2md(func) args, kwargs = [], [] spec = getargspec(func) vargsname, kwargsname = spec.varargs, spec.keywords vargs = list(make_iter(spec.args)) if spec.args else [] defaults = list(make_iter(spec.defaults)) if spec.defaults else [] while vargs: if vargs and vargs[0] == "self": args.append(vargs.pop(0)) elif len(vargs) > len(defaults): args.append(vargs.pop(0)) else: default = defaults.pop(0) if isinstance(default, str): default = "\"%s\"" % default else: default = "%s" % str(default) kwargs.append((vargs.pop(0), default)) if args: args = ", ".join("%s" % arg for arg in args) if kwargs: kwargs = ", ".join("%s=%s" % kwarg for kwarg in kwargs) if args: kwargs = ", " + kwargs if vargsname: vargsname = "*%s" % vargsname if args or kwargs: vargsname = ", " + vargsname if kwargsname: kwargsname = "**%s" % kwargsname if args or kwargs or vargsname: kwargsname = ", " + kwargsname _FUNCDEF = "{funcname}({args}{kwargs}{vargs}{vkwargs})" funcdef = _FUNCDEF.format(funcname=funcname, args=args or "", kwargs=kwargs or "", vargs=vargsname or "", vkwargs=kwargsname or "") # split the function definition if it is too long lmax = 90 if len(funcdef) > lmax: # wrap in the args list split = funcdef.split("(", 1) # we gradually build the string again rest = split[1] args = rest.split(", ") funcname = "(".join(split[:1]) + "(" lline = len(funcname) parts = [] for arg in args: larg = len(arg) if larg > lmax - 5: # not much to do if arg is so long parts.append(arg) elif lline + larg > lmax: # the next arg is too long, break the line parts.append("\\\n " + arg) lline = 0 else: parts.append(arg) lline += len(parts[-1]) funcdef = funcname + ", ".join(parts) # build the signature string = FUNC_TEMPLATE.format(section=section, header=header, funcdef=funcdef, path=path, doc=doc if doc else "*No documentation found.*") return string
108,270
Searches for the nearest penultimate `Conv` or `Pooling` layer. Args: model: The `keras.models.Model` instance. layer_idx: The layer index within `model.layers`. penultimate_layer_idx: The pre-layer to `layer_idx`. If set to None, the nearest penultimate `Conv` or `Pooling` layer is used. Returns: The penultimate layer.
def _find_penultimate_layer(model, layer_idx, penultimate_layer_idx): if penultimate_layer_idx is None: for idx, layer in utils.reverse_enumerate(model.layers[:layer_idx - 1]): if isinstance(layer, Wrapper): layer = layer.layer if isinstance(layer, (_Conv, _Pooling1D, _Pooling2D, _Pooling3D)): penultimate_layer_idx = idx break if penultimate_layer_idx is None: raise ValueError('Unable to determine penultimate `Conv` or `Pooling` ' 'layer for layer_idx: {}'.format(layer_idx)) # Handle negative indexing otherwise the next check can fail. if layer_idx < 0: layer_idx = len(model.layers) + layer_idx if penultimate_layer_idx > layer_idx: raise ValueError('`penultimate_layer_idx` needs to be before `layer_idx`') return model.layers[penultimate_layer_idx]
108,273
Creates a copy of model by modifying all activations to use a custom op to modify the backprop behavior. Args: model: The `keras.models.Model` instance. backprop_modifier: One of `{'guided', 'rectified'}` Returns: A copy of model with modified activations for backwards pass.
def modify_model_backprop(model, backprop_modifier): # The general strategy is as follows: # - Save original model so that upstream callers don't see unexpected results with their models. # - Call backend specific function that registers the custom op and loads the model under modified context manager. # - Maintain cache to save this expensive process on subsequent calls. # - Load model with custom context modifying backprop behavior. # # The reason for this round about way is because the graph needs to be rebuild when any of its layer builder # functions are changed. This is very complicated to do in Keras and makes the implementation very tightly bound # with keras internals. By saving and loading models, we dont have to worry about future compatibility. # # The only exception to this is the way advanced activations are handled which makes use of some keras internal # knowledge and might break in the future. # ADD on 22 Jul 2018: # In fact, it has broken. Currently, advanced activations are not supported. # 0. Retrieve from cache if previously computed. modified_model = _MODIFIED_MODEL_CACHE.get((model, backprop_modifier)) if modified_model is not None: return modified_model model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5') try: # 1. Save original model model.save(model_path) # 2. Register modifier and load modified model under custom context. modifier_fn = _BACKPROP_MODIFIERS.get(backprop_modifier) if modifier_fn is None: raise ValueError("'{}' modifier is not supported".format(backprop_modifier)) modifier_fn(backprop_modifier) # 3. Create graph under custom context manager. with tf.get_default_graph().gradient_override_map({'Relu': backprop_modifier}): # This should rebuild graph with modifications. modified_model = load_model(model_path) # Cache to improve subsequent call performance. _MODIFIED_MODEL_CACHE[(model, backprop_modifier)] = modified_model return modified_model finally: os.remove(model_path)
108,280
Normalizes the `output_tensor` with respect to `input_tensor` dimensions. This makes regularizer weight factor more or less uniform across various input image dimensions. Args: input_tensor: An tensor of shape: `(samples, channels, image_dims...)` if `image_data_format= channels_first` or `(samples, image_dims..., channels)` if `image_data_format=channels_last`. output_tensor: The tensor to normalize. Returns: The normalized tensor.
def normalize(input_tensor, output_tensor): image_dims = utils.get_img_shape(input_tensor)[1:] return output_tensor / np.prod(image_dims)
108,284
Builds a L-p norm function. This regularizer encourages the intensity of pixels to stay bounded. i.e., prevents pixels from taking on very large values. Args: img_input: 4D image input tensor to the model of shape: `(samples, channels, rows, cols)` if data_format='channels_first' or `(samples, rows, cols, channels)` if data_format='channels_last'. p: The pth norm to use. If p = float('inf'), infinity-norm will be used.
def __init__(self, img_input, p=6.): super(LPNorm, self).__init__() if p < 1: raise ValueError('p value should range between [1, inf)') self.name = "L-{} Norm Loss".format(p) self.p = p self.img = img_input
108,287
Updates `kwargs` with dict of `defaults` Args: defaults: A dictionary of keys and values **kwargs: The kwargs to update. Returns: The updated kwargs.
def add_defaults_to_kwargs(defaults, **kwargs): defaults = dict(defaults) defaults.update(kwargs) return defaults
108,291
Helper utility to retrieve the callable function associated with a string identifier. Args: identifier: The identifier. Could be a string or function. module_globals: The global objects of the module. module_name: The module name Returns: The callable associated with the identifier.
def get_identifier(identifier, module_globals, module_name): if isinstance(identifier, six.string_types): fn = module_globals.get(identifier) if fn is None: raise ValueError('Unknown {}: {}'.format(module_name, identifier)) return fn elif callable(identifier): return identifier else: raise ValueError('Could not interpret identifier')
108,292
Applies modifications to the model layers to create a new Graph. For example, simply changing `model.layers[idx].activation = new activation` does not change the graph. The entire graph needs to be updated with modified inbound and outbound tensors because of change in layer building function. Args: model: The `keras.models.Model` instance. Returns: The modified model with changes applied. Does not mutate the original `model`.
def apply_modifications(model, custom_objects=None): # The strategy is to save the modified model and load it back. This is done because setting the activation # in a Keras layer doesnt actually change the graph. We have to iterate the entire graph and change the # layer inbound and outbound nodes with modified tensors. This is doubly complicated in Keras 2.x since # multiple inbound and outbound nodes are allowed with the Graph API. model_path = os.path.join(tempfile.gettempdir(), next(tempfile._get_candidate_names()) + '.h5') try: model.save(model_path) return load_model(model_path, custom_objects=custom_objects) finally: os.remove(model_path)
108,293
Creates a uniformly distributed random array with the given `mean` and `std`. Args: shape: The desired shape mean: The desired mean (Default value = 128) std: The desired std (Default value = 20) Returns: Random numpy array of given `shape` uniformly distributed with desired `mean` and `std`.
def random_array(shape, mean=128., std=20.): x = np.random.random(shape) # normalize around mean=0, std=1 x = (x - np.mean(x)) / (np.std(x) + K.epsilon()) # and then around the desired mean/std x = (x * std) + mean return x
108,294
Looks up the layer index corresponding to `layer_name` from `model`. Args: model: The `keras.models.Model` instance. layer_name: The name of the layer to lookup. Returns: The layer index if found. Raises an exception otherwise.
def find_layer_idx(model, layer_name): layer_idx = None for idx, layer in enumerate(model.layers): if layer.name == layer_name: layer_idx = idx break if layer_idx is None: raise ValueError("No layer with name '{}' within the model".format(layer_name)) return layer_idx
108,295
Utility function to scale the `input_array` to `input_range` throwing away high frequency artifacts. Args: input_array: An N-dim numpy array. input_range: Specifies the input range as a `(min, max)` tuple to rescale the `input_array`. Returns: The rescaled `input_array`.
def deprocess_input(input_array, input_range=(0, 255)): # normalize tensor: center on 0., ensure std is 0.1 input_array = input_array.copy() input_array -= input_array.mean() input_array /= (input_array.std() + K.epsilon()) input_array *= 0.1 # clip to [0, 1] input_array += 0.5 input_array = np.clip(input_array, 0, 1) # Convert to `input_range` return (input_range[1] - input_range[0]) * input_array + input_range[0]
108,296
Utility function to stitch images together with a `margin`. Args: images: The array of 2D images to stitch. margin: The black border margin size between images (Default value = 5) cols: Max number of image cols. New row is created when number of images exceed the column size. (Default value = 5) Returns: A single numpy image array comprising of input images.
def stitch_images(images, margin=5, cols=5): if len(images) == 0: return None h, w, c = images[0].shape n_rows = int(math.ceil(len(images) / cols)) n_cols = min(len(images), cols) out_w = n_cols * w + (n_cols - 1) * margin out_h = n_rows * h + (n_rows - 1) * margin stitched_images = np.zeros((out_h, out_w, c), dtype=images[0].dtype) for row in range(n_rows): for col in range(n_cols): img_idx = row * cols + col if img_idx >= len(images): break stitched_images[(h + margin) * row: (h + margin) * row + h, (w + margin) * col: (w + margin) * col + w, :] = images[img_idx] return stitched_images
108,297
Returns image shape in a backend agnostic manner. Args: img: An image tensor of shape: `(channels, image_dims...)` if data_format='channels_first' or `(image_dims..., channels)` if data_format='channels_last'. Returns: Tuple containing image shape information in `(samples, channels, image_dims...)` order.
def get_img_shape(img): if isinstance(img, np.ndarray): shape = img.shape else: shape = K.int_shape(img) if K.image_data_format() == 'channels_last': shape = list(shape) shape.insert(1, shape[-1]) shape = tuple(shape[:-1]) return shape
108,298
Utility function to load an image from disk. Args: path: The image file path. grayscale: True to convert to grayscale image (Default value = False) target_size: (w, h) to resize. (Default value = None) Returns: The loaded numpy image.
def load_img(path, grayscale=False, target_size=None): img = io.imread(path, grayscale) if target_size: img = transform.resize(img, target_size, preserve_range=True).astype('uint8') return img
108,299
Utility function to return the image net label for the final `dense` layer output index. Args: indices: Could be a single value or an array of indices whose labels should be looked up. Returns: Image net label corresponding to the image category.
def lookup_imagenet_labels(indices): global _CLASS_INDEX if _CLASS_INDEX is None: with open(os.path.join(os.path.dirname(__file__), '../../resources/imagenet_class_index.json')) as f: _CLASS_INDEX = json.load(f) indices = listify(indices) return [_CLASS_INDEX[str(idx)][1] for idx in indices]
108,300
Draws text over the image. Requires PIL. Args: img: The image to use. text: The text string to overlay. position: The text (x, y) position. (Default value = (10, 10)) font: The ttf or open type font to use. (Default value = 'FreeSans.ttf') font_size: The text font size. (Default value = 12) color: The (r, g, b) values for text color. (Default value = (0, 0, 0)) Returns: Image overlayed with text.
def draw_text(img, text, position=(10, 10), font='FreeSans.ttf', font_size=14, color=(0, 0, 0)): _check_pil() font_files = _find_font_file(font) if len(font_files) == 0: logger.warn("Failed to lookup font '{}', falling back to default".format(font)) font = ImageFont.load_default() else: font = ImageFont.truetype(font_files[0], font_size) # Don't mutate original image img = Image.fromarray(img) draw = ImageDraw.Draw(img) draw.text(position, text, fill=color, font=font) return np.asarray(img)
108,301
Normalizes the numpy array to (min_value, max_value) Args: array: The numpy array min_value: The min value in normalized array (Default value = 0) max_value: The max value in normalized array (Default value = 1) Returns: The array normalized to range between (min_value, max_value)
def normalize(array, min_value=0., max_value=1.): arr_min = np.min(array) arr_max = np.max(array) normalized = (array - arr_min) / (arr_max - arr_min + K.epsilon()) return (max_value - min_value) * normalized + min_value
108,302
Determines the number of filters within the given `layer`. Args: layer: The keras layer to use. Returns: Total number of filters within `layer`. For `keras.layers.Dense` layer, this is the total number of outputs.
def get_num_filters(layer): # Handle layers with no channels. if K.ndim(layer.output) == 2: return K.int_shape(layer.output)[-1] channel_idx = 1 if K.image_data_format() == 'channels_first' else -1 return K.int_shape(layer.output)[channel_idx]
108,304
Overlays `array1` onto `array2` with `alpha` blending. Args: array1: The first numpy array. array2: The second numpy array. alpha: The alpha value of `array1` as overlayed onto `array2`. This value needs to be between [0, 1], with 0 being `array2` only to 1 being `array1` only (Default value = 0.5). Returns: The `array1`, overlayed with `array2` using `alpha` blending.
def overlay(array1, array2, alpha=0.5): if alpha < 0. or alpha > 1.: raise ValueError("`alpha` needs to be between [0, 1]") if array1.shape != array2.shape: raise ValueError('`array1` and `array2` must have the same shapes') return (array1 * alpha + array2 * (1. - alpha)).astype(array1.dtype)
108,305
Retrieve the Engine-level model params from a Swarm model Args: modelID - Engine-level model ID of the Swarm model Returns: JSON-encoded string containing Model Params
def getSwarmModelParams(modelID): # TODO: the use of nupic.frameworks.opf.helpers.loadExperimentDescriptionScriptFromDir when # retrieving module params results in a leakage of pf_base_descriptionNN and # pf_descriptionNN module imports for every call to getSwarmModelParams, so # the leakage is unlimited when getSwarmModelParams is called by a # long-running process. An alternate solution is to execute the guts of # this function's logic in a seprate process (via multiprocessing module). cjDAO = ClientJobsDAO.get() (jobID, description) = cjDAO.modelsGetFields( modelID, ["jobId", "genDescription"]) (baseDescription,) = cjDAO.jobGetFields(jobID, ["genBaseDescription"]) # Construct a directory with base.py and description.py for loading model # params, and use nupic.frameworks.opf.helpers to extract model params from # those files descriptionDirectory = tempfile.mkdtemp() try: baseDescriptionFilePath = os.path.join(descriptionDirectory, "base.py") with open(baseDescriptionFilePath, mode="wb") as f: f.write(baseDescription) descriptionFilePath = os.path.join(descriptionDirectory, "description.py") with open(descriptionFilePath, mode="wb") as f: f.write(description) expIface = helpers.getExperimentDescriptionInterfaceFromModule( helpers.loadExperimentDescriptionScriptFromDir(descriptionDirectory)) return json.dumps( dict( modelConfig=expIface.getModelDescription(), inferenceArgs=expIface.getModelControl().get("inferenceArgs", None))) finally: shutil.rmtree(descriptionDirectory, ignore_errors=True)
108,310
[private] Create the default database connection policy instance Parameters: ---------------------------------------------------------------- retval: The default database connection policy instance
def _createDefaultPolicy(cls): logger = _getLogger(cls) logger.debug( "Creating database connection policy: platform=%r; pymysql.VERSION=%r", platform.system(), pymysql.VERSION) if platform.system() == "Java": # NOTE: PooledDB doesn't seem to work under Jython # NOTE: not appropriate for multi-threaded applications. # TODO: this was fixed in Webware DBUtils r8228, so once # we pick up a realease with this fix, we should use # PooledConnectionPolicy for both Jython and Python. policy = SingleSharedConnectionPolicy() else: policy = PooledConnectionPolicy() return policy
108,315
Get a Connection instance. Parameters: ---------------------------------------------------------------- retval: A ConnectionWrapper instance. NOTE: Caller is responsible for calling the ConnectionWrapper instance's release() method or use it in a context manager expression (with ... as:) to release resources.
def acquireConnection(self): self._logger.debug("Acquiring connection") # Check connection and attempt to re-establish it if it died (this is # what PooledDB does) self._conn._ping_check() connWrap = ConnectionWrapper(dbConn=self._conn, cursor=self._conn.cursor(), releaser=self._releaseConnection, logger=self._logger) return connWrap
108,322
Get a connection from the pool. Parameters: ---------------------------------------------------------------- retval: A ConnectionWrapper instance. NOTE: Caller is responsible for calling the ConnectionWrapper instance's release() method or use it in a context manager expression (with ... as:) to release resources.
def acquireConnection(self): self._logger.debug("Acquiring connection") dbConn = self._pool.connection(shareable=False) connWrap = ConnectionWrapper(dbConn=dbConn, cursor=dbConn.cursor(), releaser=self._releaseConnection, logger=self._logger) return connWrap
108,325
Create a Connection instance. Parameters: ---------------------------------------------------------------- retval: A ConnectionWrapper instance. NOTE: Caller is responsible for calling the ConnectionWrapper instance's release() method or use it in a context manager expression (with ... as:) to release resources.
def acquireConnection(self): self._logger.debug("Acquiring connection") dbConn = SteadyDB.connect(** _getCommonSteadyDBArgsDict()) connWrap = ConnectionWrapper(dbConn=dbConn, cursor=dbConn.cursor(), releaser=self._releaseConnection, logger=self._logger) return connWrap
108,328
The range of connected synapses for column. This is used to calculate the inhibition radius. This variation of the function only supports a 1 dimensional column topology. Parameters: ---------------------------- :param columnIndex: The index identifying a column in the permanence, potential and connectivity matrices
def _avgConnectedSpanForColumn1D(self, columnIndex): assert(self._inputDimensions.size == 1) connected = self._connectedSynapses[columnIndex].nonzero()[0] if connected.size == 0: return 0 else: return max(connected) - min(connected) + 1
108,372
The range of connectedSynapses per column, averaged for each dimension. This value is used to calculate the inhibition radius. This variation of the function only supports a 2 dimensional column topology. Parameters: ---------------------------- :param columnIndex: The index identifying a column in the permanence, potential and connectivity matrices
def _avgConnectedSpanForColumn2D(self, columnIndex): assert(self._inputDimensions.size == 2) connected = self._connectedSynapses[columnIndex] (rows, cols) = connected.reshape(self._inputDimensions).nonzero() if rows.size == 0 and cols.size == 0: return 0 rowSpan = rows.max() - rows.min() + 1 colSpan = cols.max() - cols.min() + 1 return numpy.average([rowSpan, colSpan])
108,373
Runs the OPF Model Parameters: ------------------------------------------------------------------------- retval: (completionReason, completionMsg) where completionReason is one of the ClientJobsDAO.CMPL_REASON_XXX equates.
def run(self): # ----------------------------------------------------------------------- # Load the experiment's description.py module descriptionPyModule = helpers.loadExperimentDescriptionScriptFromDir( self._experimentDir) expIface = helpers.getExperimentDescriptionInterfaceFromModule( descriptionPyModule) expIface.normalizeStreamSources() modelDescription = expIface.getModelDescription() self._modelControl = expIface.getModelControl() # ----------------------------------------------------------------------- # Create the input data stream for this task streamDef = self._modelControl['dataset'] from nupic.data.stream_reader import StreamReader readTimeout = 0 self._inputSource = StreamReader(streamDef, isBlocking=False, maxTimeout=readTimeout) # ----------------------------------------------------------------------- #Get field statistics from the input source fieldStats = self._getFieldStats() # ----------------------------------------------------------------------- # Construct the model instance self._model = ModelFactory.create(modelDescription) self._model.setFieldStatistics(fieldStats) self._model.enableLearning() self._model.enableInference(self._modelControl.get("inferenceArgs", None)) # ----------------------------------------------------------------------- # Instantiate the metrics self.__metricMgr = MetricsManager(self._modelControl.get('metrics',None), self._model.getFieldInfo(), self._model.getInferenceType()) self.__loggedMetricPatterns = self._modelControl.get("loggedMetrics", []) self._optimizedMetricLabel = self.__getOptimizedMetricLabel() self._reportMetricLabels = matchPatterns(self._reportKeyPatterns, self._getMetricLabels()) # ----------------------------------------------------------------------- # Initialize periodic activities (e.g., for model result updates) self._periodic = self._initPeriodicActivities() # ----------------------------------------------------------------------- # Create our top-level loop-control iterator numIters = self._modelControl.get('iterationCount', -1) # Are we asked to turn off learning for a certain # of iterations near the # end? learningOffAt = None iterationCountInferOnly = self._modelControl.get('iterationCountInferOnly', 0) if iterationCountInferOnly == -1: self._model.disableLearning() elif iterationCountInferOnly > 0: assert numIters > iterationCountInferOnly, "when iterationCountInferOnly " \ "is specified, iterationCount must be greater than " \ "iterationCountInferOnly." learningOffAt = numIters - iterationCountInferOnly self.__runTaskMainLoop(numIters, learningOffAt=learningOffAt) # ----------------------------------------------------------------------- # Perform final operations for model self._finalize() return (self._cmpReason, None)
108,445
Main loop of the OPF Model Runner. Parameters: ----------------------------------------------------------------------- recordIterator: Iterator for counting number of records (see _runTask) learningOffAt: If not None, learning is turned off when we reach this iteration number
def __runTaskMainLoop(self, numIters, learningOffAt=None): ## Reset sequence states in the model, so it starts looking for a new ## sequence self._model.resetSequenceStates() self._currentRecordIndex = -1 while True: # If killed by a terminator, stop running if self._isKilled: break # If job stops or hypersearch ends, stop running if self._isCanceled: break # If the process is about to be killed, set as orphaned if self._isInterrupted.isSet(): self.__setAsOrphaned() break # If model is mature, stop running ONLY IF we are not the best model # for the job. Otherwise, keep running so we can keep returning # predictions to the user if self._isMature: if not self._isBestModel: self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED break else: self._cmpReason = self._jobsDAO.CMPL_REASON_EOF # Turn off learning? if learningOffAt is not None \ and self._currentRecordIndex == learningOffAt: self._model.disableLearning() # Read input record. Note that any failure here is a critical JOB failure # and results in the job being immediately canceled and marked as # failed. The runModelXXX code in hypesearch.utils, if it sees an # exception of type utils.JobFailException, will cancel the job and # copy the error message into the job record. try: inputRecord = self._inputSource.getNextRecordDict() if self._currentRecordIndex < 0: self._inputSource.setTimeout(10) except Exception, e: raise utils.JobFailException(ErrorCodes.streamReading, str(e.args), traceback.format_exc()) if inputRecord is None: # EOF self._cmpReason = self._jobsDAO.CMPL_REASON_EOF break if inputRecord: # Process input record self._currentRecordIndex += 1 result = self._model.run(inputRecord=inputRecord) # Compute metrics. result.metrics = self.__metricMgr.update(result) # If there are None, use defaults. see MetricsManager.getMetrics() # TODO remove this when JAVA API server is gone if not result.metrics: result.metrics = self.__metricMgr.getMetrics() # Write the result to the output cache. Don't write encodings, if they # were computed if InferenceElement.encodings in result.inferences: result.inferences.pop(InferenceElement.encodings) result.sensorInput.dataEncodings = None self._writePrediction(result) # Run periodic activities self._periodic.tick() if numIters >= 0 and self._currentRecordIndex >= numIters-1: break else: # Input source returned an empty record. # # NOTE: This is okay with Stream-based Source (when it times out # waiting for next record), but not okay with FileSource, which should # always return either with a valid record or None for EOF. raise ValueError("Got an empty record from FileSource: %r" % inputRecord)
108,446
Delete the stored checkpoint for the specified modelID. This function is called if the current model is now the best model, making the old model's checkpoint obsolete Parameters: ----------------------------------------------------------------------- modelID: The modelID for the checkpoint to delete. This is NOT the unique checkpointID
def __deleteModelCheckpoint(self, modelID): checkpointID = \ self._jobsDAO.modelsGetFields(modelID, ['modelCheckpointId'])[0] if checkpointID is None: return try: shutil.rmtree(os.path.join(self._experimentDir, str(self._modelCheckpointGUID))) except: self._logger.warn("Failed to delete model checkpoint %s. "\ "Assuming that another worker has already deleted it", checkpointID) return self._jobsDAO.modelSetFields(modelID, {'modelCheckpointId':None}, ignoreUnchanged=True) return
108,449
Get the label for the metric being optimized. This function also caches the label in the instance variable self._optimizedMetricLabel Parameters: ----------------------------------------------------------------------- metricLabels: A sequence of all the labels being computed for this model Returns: The label for the metric being optmized over
def __getOptimizedMetricLabel(self): matchingKeys = matchPatterns([self._optimizeKeyPattern], self._getMetricLabels()) if len(matchingKeys) == 0: raise Exception("None of the generated metrics match the specified " "optimization pattern: %s. Available metrics are %s" % \ (self._optimizeKeyPattern, self._getMetricLabels())) elif len(matchingKeys) > 1: raise Exception("The specified optimization pattern '%s' matches more " "than one metric: %s" % (self._optimizeKeyPattern, matchingKeys)) return matchingKeys[0]
108,451
Delete's the output cache associated with the given modelID. This actually clears up the resources associated with the cache, rather than deleting al the records in the cache Parameters: ----------------------------------------------------------------------- modelID: The id of the model whose output cache is being deleted
def __deleteOutputCache(self, modelID): # If this is our output, we should close the connection if modelID == self._modelID and self._predictionLogger is not None: self._predictionLogger.close() del self.__predictionCache self._predictionLogger = None self.__predictionCache = None
108,459
Creates and returns a PeriodicActivityMgr instance initialized with our periodic activities Parameters: ------------------------------------------------------------------------- retval: a PeriodicActivityMgr instance
def _initPeriodicActivities(self): # Activity to update the metrics for this model # in the models table updateModelDBResults = PeriodicActivityRequest(repeating=True, period=100, cb=self._updateModelDBResults) updateJobResults = PeriodicActivityRequest(repeating=True, period=100, cb=self.__updateJobResultsPeriodic) checkCancelation = PeriodicActivityRequest(repeating=True, period=50, cb=self.__checkCancelation) checkMaturity = PeriodicActivityRequest(repeating=True, period=10, cb=self.__checkMaturity) # Do an initial update of the job record after 2 iterations to make # sure that it is populated with something without having to wait too long updateJobResultsFirst = PeriodicActivityRequest(repeating=False, period=2, cb=self.__updateJobResultsPeriodic) periodicActivities = [updateModelDBResults, updateJobResultsFirst, updateJobResults, checkCancelation] if self._isMaturityEnabled: periodicActivities.append(checkMaturity) return PeriodicActivityMgr(requestedActivities=periodicActivities)
108,460