text
stringlengths
28
881k
from dalek_debug import DalekPrint, DalekDebugOn , DalekDebugSetOutputDeviceNEWLINEimport timeNEWLINEimport dalek_spiNEWLINEimport RPi.GPIO as GPIO # Import GPIO diversNEWLINEdalek_spi.init()NEWLINENEWLINENEWLINE#####################################################NEWLINENEWLINE# This is just for playing with the bits to see if it works :)NEWLINE# do not leave code here that you need NEWLINENEWLINE#####################################################NEWLINEDalekDebugOn()NEWLINE# DalekDebugSetOutputDevice("scrollphat")NEWLINE# DalekPrint("hello phil from the main module")NEWLINENEWLINEDalekPrint("Spin Left 56","SL")NEWLINEDalekPrint("Spin Left {}".format(666),"KKK" )NEWLINEDalekPrint("Turn Right 56")NEWLINEDalekPrint("\n... Shutting Down...\n")NEWLINEDalekPrint("\n\nReturning to Main Menu\n\n", "HM")NEWLINEDalekPrint("","PSS")NEWLINE# while True:NEWLINE NEWLINE# # piSensors = DalekSpi.readDevice1Data() NEWLINE# # DalekPrint(piSensors['frontPing'] )NEWLINE# # DalekPrint(piSensors['compass'] )NEWLINE# DalekSpi.test()NEWLINENEWLINE# # mag = DalekSpi.readDevice1Data()NEWLINE# # DalekPrint(mag)NEWLINE# time.sleep(.2)NEWLINENEWLINE NEWLINE
# coding=utf-8NEWLINE"""NEWLINEAplicações distribuídas - Projeto 3 - client.pyNEWLINEGrupo: 20NEWLINEAlunos: 43551 45802 43304NEWLINE"""NEWLINEimport json, requests, pprint, signal, subprocess, webbrowser, sys, hashlibNEWLINENEWLINENEWLINENEWLINEactions = ["ADD", "SHOW", "REMOVE", "UPDATE"]NEWLINEgenero = ["pop", "rock", "indy", "trance", "metal"]NEWLINErates = {"M": 1, "MM": 2, "S": 3, "B": 4, "MB": 5}NEWLINENEWLINEs = requests.session()NEWLINEs.cert = ('./certs/cliente.crt', './certs/cliente.key')NEWLINEs.verify = './certs/root.pem'NEWLINEsession_token=NoneNEWLINENEWLINE## TODO: Se houver tempo, fazer o command HELPNEWLINE## TODO: Cliente liga-> chNEWLINENEWLINEdef handler(signum, frame):NEWLINE print ""NEWLINE print 'closing program...'NEWLINE sys.exit()NEWLINENEWLINE# Control + z and Control + c handlersNEWLINEsignal.signal(signal.SIGTSTP, handler)NEWLINEsignal.signal(signal.SIGINT, handler)NEWLINENEWLINENEWLINEdef login():NEWLINE global session_tokenNEWLINE url = 'https://localhost:5000/login'NEWLINE if sys.platform == 'darwin': # in case of OS XNEWLINE subprocess.Popen(['open', url])NEWLINE else:NEWLINE webbrowser.open_new_tab(url)NEWLINENEWLINE tokenchecksum=raw_input("Enter here the token code given to you on the website:\n")NEWLINE token=tokenchecksum[:40]NEWLINE checksum=tokenchecksum[40:]NEWLINE if hashlib.sha256(token).hexdigest()!=checksum:NEWLINE print "Token is incorrect, restart the client and try again"NEWLINE sys.exit(-1)NEWLINE else:NEWLINE session_token=tokenNEWLINENEWLINENEWLINEwhile True:NEWLINE try:NEWLINE if session_token==None:NEWLINE print "Login is required:"NEWLINE login()NEWLINE s.cookies.set("token",session_token)NEWLINE continueNEWLINENEWLINE msg = raw_input("Comand: ").split(" ")NEWLINE data = {}NEWLINE url = 'https://localhost:5000'NEWLINE if msg[0] in actions:NEWLINENEWLINE if msg[0] == "ADD":NEWLINE if msg[1] == "USER" and len(msg) == 5:NEWLINE data = {'nome': msg[2], 'username': msg[3], 'password': msg[4]}NEWLINE url += '/utilizadores'NEWLINENEWLINE elif msg[1] == "BANDA" and len(msg) == 5:NEWLINE if msg[4] in genero:NEWLINE try:NEWLINE int(msg[3])NEWLINE except ValueError:NEWLINE print "The year provided was not an Integer"NEWLINE continueNEWLINENEWLINE data = {'nome': msg[2], 'ano': msg[3], 'genero': msg[4]}NEWLINE url += '/bandas'NEWLINE else:NEWLINE print "Gender given is not valid\nValid genders: pop | rock | indy | metal | trance"NEWLINE continueNEWLINE elif msg[1] == "ALBUM" and len(msg) == 5:NEWLINE flag = FalseNEWLINE try:NEWLINE int(msg[2])NEWLINE flag = TrueNEWLINE int(msg[4])NEWLINE except ValueError:NEWLINE if flag:NEWLINE print "The year provided is not an Integer"NEWLINE else:NEWLINE print "The ID provided was not an Integer"NEWLINE continueNEWLINE data = {'id_banda': msg[2], 'nome': msg[3], 'ano': msg[4]}NEWLINE url += '/albuns'NEWLINENEWLINE elif len(msg) == 4:NEWLINE if msg[3] in rates.keys():NEWLINE flag = FalseNEWLINE try:NEWLINE int(msg[1])NEWLINE flag = TrueNEWLINE int(msg[2])NEWLINE except ValueError:NEWLINE if flag:NEWLINE print "The year provided was not an Integer"NEWLINE else:NEWLINE print "The ID provided was not an Integer"NEWLINE continueNEWLINE url += '/albuns/rate'NEWLINE data = {'id_user': msg[1], 'id_album': msg[2], 'id_rate': rates[msg[3]]}NEWLINE else:NEWLINE print "Rate given is invalid\nValid Ratings: M | MM | S | B | MB"NEWLINE continueNEWLINE else:NEWLINE print "ADD parameters where not correct"NEWLINE continueNEWLINENEWLINE # Correu tudo bem entao faz o pedido!NEWLINE print 'url', urlNEWLINE request = s.put(url=url, json=data)NEWLINE response = json.loads(request.text.encode('utf8'))NEWLINE pprint.pprint(response)NEWLINENEWLINE elif msg[0] == "SHOW" or msg[0] == "REMOVE":NEWLINE if msg[1] == "USER" and len(msg) == 3:NEWLINE try:NEWLINE int(msg[2])NEWLINE url += '/utilizadores/' + msg[2]NEWLINE except ValueError:NEWLINE print "User id provided was not an Integer"NEWLINE continueNEWLINENEWLINE elif msg[1] == "BANDA" and len(msg) == 3:NEWLINE try:NEWLINE int(msg[2])NEWLINE url += '/bandas/' + msg[2]NEWLINE except ValueError:NEWLINE print "Band id provided was not an Integer"NEWLINE continueNEWLINENEWLINE elif msg[1] == "ALBUM" and len(msg) == 3:NEWLINE try:NEWLINE int(msg[2])NEWLINE url += '/albuns/' + msg[2]NEWLINE except ValueError:NEWLINE print "Album id provided was not an Integer"NEWLINE continueNEWLINENEWLINE elif msg[1] == "ALL":NEWLINE if msg[2] == "USERS" and len(msg) == 3:NEWLINE url += '/utilizadores'NEWLINENEWLINE elif msg[2] == "BANDAS" and len(msg) == 3:NEWLINE url += '/bandas'NEWLINENEWLINE elif msg[2] == "ALBUNS" and len(msg) == 3:NEWLINE url += '/albuns'NEWLINENEWLINE elif msg[2] == "ALBUNS" and len(msg) == 4 and msg[3] in rates.keys():NEWLINE url += '/albuns/rate/' + str(rates[msg[3]])NEWLINENEWLINE elif msg[2] == "ALBUNS_B" and len(msg) == 4:NEWLINE try:NEWLINE int(msg[3])NEWLINE url += '/albuns/banda/' + msg[3]NEWLINE except ValueError:NEWLINE print "Band id provided was not an Integer"NEWLINE continueNEWLINENEWLINE elif msg[2] == "ALBUNS_U" and len(msg) == 4:NEWLINE try:NEWLINE int(msg[3])NEWLINE url += '/albuns/user/' + msg[3]NEWLINE except ValueError:NEWLINE print "User id provided was not an Integer"NEWLINE continueNEWLINENEWLINE else:NEWLINE print "REMOVE/SHOW ALL parameters where not valid"NEWLINE continueNEWLINENEWLINE else:NEWLINE print "REMOVE/SHOW parameters where not valid"NEWLINE continueNEWLINENEWLINE # Correu tudo bem entao faz o pedido!NEWLINE if msg[0] == "SHOW":NEWLINE print 'url', urlNEWLINE request = s.get(url=url)NEWLINE response = json.loads(request.text.encode('utf8'))NEWLINE pprint.pprint(response)NEWLINE else:NEWLINE print 'url', urlNEWLINE request = s.delete(url=url)NEWLINE response = json.loads(request.text.encode('utf8'))NEWLINE pprint.pprint(response)NEWLINENEWLINE elif msg[0] == "UPDATE":NEWLINE if msg[1] == "ALBUM" and len(msg) == 5:NEWLINE if msg[4] in rates.keys():NEWLINE flag = FalseNEWLINE try:NEWLINE int(msg[2])NEWLINE flag = TrueNEWLINE int(msg[3])NEWLINE except ValueError:NEWLINE if flag:NEWLINE print "Album ID was not an Integer"NEWLINE else:NEWLINE print "User ID was not an Integer"NEWLINE continueNEWLINE data = {'id_user': msg[2], 'id_album': msg[3], 'id_rate': rates[msg[4]]}NEWLINE url += '/albuns'NEWLINE else:NEWLINE print "Rate given is invalid\nValid Ratings: M | MM | S | B | MB"NEWLINE continueNEWLINENEWLINE elif msg[1] == "USER" and len(msg) == 4:NEWLINE try:NEWLINE int(msg[2])NEWLINE except ValueError:NEWLINE print "User ID was not an Integer"NEWLINE continueNEWLINE data = {'id_user': msg[2], 'password': msg[3]}NEWLINE url += '/utilizadores'NEWLINENEWLINE else:NEWLINE print "UPDATE parameters where not valid"NEWLINE continueNEWLINENEWLINE # Correu tudo bem entao faz o pedido!NEWLINE print 'url', urlNEWLINE request = s.patch(url=url, json=data)NEWLINE response = json.loads(request.text.encode('utf8'))NEWLINE pprint.pprint(response)NEWLINE else:NEWLINE print msg[0] + " is not a valid command"NEWLINE continueNEWLINENEWLINE except KeyboardInterrupt:NEWLINE exit()NEWLINE except Exception as e:NEWLINE print e.messageNEWLINE print e.argsNEWLINE print "ERROR"NEWLINE sys.exit()NEWLINENEWLINE
"""NEWLINEUse this module directly:NEWLINE import xarray.plot as xpltNEWLINENEWLINEOr use the methods on a DataArray:NEWLINE DataArray.plot._____NEWLINE"""NEWLINEimport functoolsNEWLINENEWLINEimport numpy as npNEWLINEimport pandas as pdNEWLINENEWLINEfrom .facetgrid import _easy_facetgridNEWLINEfrom .utils import (NEWLINE _add_colorbar, _ensure_plottable, _infer_interval_breaks, _infer_xy_labels,NEWLINE _interval_to_double_bound_points, _interval_to_mid_points,NEWLINE _process_cmap_cbar_kwargs, _rescale_imshow_rgb, _resolve_intervals_2dplot,NEWLINE _update_axes, _valid_other_type, get_axis, import_matplotlib_pyplot,NEWLINE label_from_attrs)NEWLINENEWLINENEWLINEdef _infer_line_data(darray, x, y, hue):NEWLINE error_msg = ('must be either None or one of ({0:s})'NEWLINE .format(', '.join([repr(dd) for dd in darray.dims])))NEWLINE ndims = len(darray.dims)NEWLINENEWLINE if x is not None and x not in darray.dims and x not in darray.coords:NEWLINE raise ValueError('x ' + error_msg)NEWLINENEWLINE if y is not None and y not in darray.dims and y not in darray.coords:NEWLINE raise ValueError('y ' + error_msg)NEWLINENEWLINE if x is not None and y is not None:NEWLINE raise ValueError('You cannot specify both x and y kwargs'NEWLINE 'for line plots.')NEWLINENEWLINE if ndims == 1:NEWLINE huename = NoneNEWLINE hueplt = NoneNEWLINE huelabel = ''NEWLINENEWLINE if x is not None:NEWLINE xplt = darray[x]NEWLINE yplt = darrayNEWLINENEWLINE elif y is not None:NEWLINE xplt = darrayNEWLINE yplt = darray[y]NEWLINENEWLINE else: # Both x & y are NoneNEWLINE dim = darray.dims[0]NEWLINE xplt = darray[dim]NEWLINE yplt = darrayNEWLINENEWLINE else:NEWLINE if x is None and y is None and hue is None:NEWLINE raise ValueError('For 2D inputs, please'NEWLINE 'specify either hue, x or y.')NEWLINENEWLINE if y is None:NEWLINE xname, huename = _infer_xy_labels(darray=darray, x=x, y=hue)NEWLINE xplt = darray[xname]NEWLINE if xplt.ndim > 1:NEWLINE if huename in darray.dims:NEWLINE otherindex = 1 if darray.dims.index(huename) == 0 else 0NEWLINE otherdim = darray.dims[otherindex]NEWLINE yplt = darray.transpose(NEWLINE otherdim, huename, transpose_coords=False)NEWLINE xplt = xplt.transpose(NEWLINE otherdim, huename, transpose_coords=False)NEWLINE else:NEWLINE raise ValueError('For 2D inputs, hue must be a dimension'NEWLINE + ' i.e. one of ' + repr(darray.dims))NEWLINENEWLINE else:NEWLINE yplt = darray.transpose(xname, huename)NEWLINENEWLINE else:NEWLINE yname, huename = _infer_xy_labels(darray=darray, x=y, y=hue)NEWLINE yplt = darray[yname]NEWLINE if yplt.ndim > 1:NEWLINE if huename in darray.dims:NEWLINE otherindex = 1 if darray.dims.index(huename) == 0 else 0NEWLINE otherdim = darray.dims[otherindex]NEWLINE xplt = darray.transpose(NEWLINE otherdim, huename, transpose_coords=False)NEWLINE else:NEWLINE raise ValueError('For 2D inputs, hue must be a dimension'NEWLINE + ' i.e. one of ' + repr(darray.dims))NEWLINENEWLINE else:NEWLINE xplt = darray.transpose(yname, huename)NEWLINENEWLINE huelabel = label_from_attrs(darray[huename])NEWLINE hueplt = darray[huename]NEWLINENEWLINE xlabel = label_from_attrs(xplt)NEWLINE ylabel = label_from_attrs(yplt)NEWLINENEWLINE return xplt, yplt, hueplt, xlabel, ylabel, huelabelNEWLINENEWLINENEWLINEdef plot(darray, row=None, col=None, col_wrap=None, ax=None, hue=None,NEWLINE rtol=0.01, subplot_kws=None, **kwargs):NEWLINE """NEWLINE Default plot of DataArray using matplotlib.pyplot.NEWLINENEWLINE Calls xarray plotting function based on the dimensions ofNEWLINE darray.squeeze()NEWLINENEWLINE =============== ===========================NEWLINE Dimensions Plotting functionNEWLINE --------------- ---------------------------NEWLINE 1 :py:func:`xarray.plot.line`NEWLINE 2 :py:func:`xarray.plot.pcolormesh`NEWLINE Anything else :py:func:`xarray.plot.hist`NEWLINE =============== ===========================NEWLINENEWLINE ParametersNEWLINE ----------NEWLINE darray : DataArrayNEWLINE row : string, optionalNEWLINE If passed, make row faceted plots on this dimension nameNEWLINE col : string, optionalNEWLINE If passed, make column faceted plots on this dimension nameNEWLINE hue : string, optionalNEWLINE If passed, make faceted line plots with hue on this dimension nameNEWLINE col_wrap : integer, optionalNEWLINE Use together with ``col`` to wrap faceted plotsNEWLINE ax : matplotlib axes, optionalNEWLINE If None, uses the current axis. Not applicable when using facets.NEWLINE rtol : number, optionalNEWLINE Relative tolerance used to determine if the indexesNEWLINE are uniformly spaced. Usually a small positive number.NEWLINE subplot_kws : dict, optionalNEWLINE Dictionary of keyword arguments for matplotlib subplots. Only appliesNEWLINE to FacetGrid plotting.NEWLINE **kwargs : optionalNEWLINE Additional keyword arguments to matplotlibNEWLINENEWLINE """NEWLINE darray = darray.squeeze().compute()NEWLINENEWLINE plot_dims = set(darray.dims)NEWLINE plot_dims.discard(row)NEWLINE plot_dims.discard(col)NEWLINE plot_dims.discard(hue)NEWLINENEWLINE ndims = len(plot_dims)NEWLINENEWLINE error_msg = ('Only 1d and 2d plots are supported for facets in xarray. 'NEWLINE 'See the package `Seaborn` for more options.')NEWLINENEWLINE if ndims in [1, 2]:NEWLINE if row or col:NEWLINE kwargs['row'] = rowNEWLINE kwargs['col'] = colNEWLINE kwargs['col_wrap'] = col_wrapNEWLINE kwargs['subplot_kws'] = subplot_kwsNEWLINE if ndims == 1:NEWLINE plotfunc = lineNEWLINE kwargs['hue'] = hueNEWLINE elif ndims == 2:NEWLINE if hue:NEWLINE plotfunc = lineNEWLINE kwargs['hue'] = hueNEWLINE else:NEWLINE plotfunc = pcolormeshNEWLINE else:NEWLINE if row or col or hue:NEWLINE raise ValueError(error_msg)NEWLINE plotfunc = histNEWLINENEWLINE kwargs['ax'] = axNEWLINENEWLINE return plotfunc(darray, **kwargs)NEWLINENEWLINENEWLINE# This function signature should not change so that it can useNEWLINE# matplotlib format stringsNEWLINEdef line(darray, *args, row=None, col=None, figsize=None, aspect=None,NEWLINE size=None, ax=None, hue=None, x=None, y=None, xincrease=None,NEWLINE yincrease=None, xscale=None, yscale=None, xticks=None, yticks=None,NEWLINE xlim=None, ylim=None, add_legend=True, _labels=True, **kwargs):NEWLINE """NEWLINE Line plot of DataArray index against valuesNEWLINENEWLINE Wraps :func:`matplotlib:matplotlib.pyplot.plot`NEWLINENEWLINE ParametersNEWLINE ----------NEWLINE darray : DataArrayNEWLINE Must be 1 dimensionalNEWLINE figsize : tuple, optionalNEWLINE A tuple (width, height) of the figure in inches.NEWLINE Mutually exclusive with ``size`` and ``ax``.NEWLINE aspect : scalar, optionalNEWLINE Aspect ratio of plot, so that ``aspect * size`` gives the width inNEWLINE inches. Only used if a ``size`` is provided.NEWLINE size : scalar, optionalNEWLINE If provided, create a new figure for the plot with the given size.NEWLINE Height (in inches) of each plot. See also: ``aspect``.NEWLINE ax : matplotlib axes object, optionalNEWLINE Axis on which to plot this figure. By default, use the current axis.NEWLINE Mutually exclusive with ``size`` and ``figsize``.NEWLINE hue : string, optionalNEWLINE Dimension or coordinate for which you want multiple lines plotted.NEWLINE If plotting against a 2D coordinate, ``hue`` must be a dimension.NEWLINE x, y : string, optionalNEWLINE Dimensions or coordinates for x, y axis.NEWLINE Only one of these may be specified.NEWLINE The other coordinate plots values from the DataArray on which thisNEWLINE plot method is called.NEWLINE xscale, yscale : 'linear', 'symlog', 'log', 'logit', optionalNEWLINE Specifies scaling for the x- and y-axes respectivelyNEWLINE xticks, yticks : Specify tick locations for x- and y-axesNEWLINE xlim, ylim : Specify x- and y-axes limitsNEWLINE xincrease : None, True, or False, optionalNEWLINE Should the values on the x axes be increasing from left to right?NEWLINE if None, use the default for the matplotlib function.NEWLINE yincrease : None, True, or False, optionalNEWLINE Should the values on the y axes be increasing from top to bottom?NEWLINE if None, use the default for the matplotlib function.NEWLINE add_legend : boolean, optionalNEWLINE Add legend with y axis coordinates (2D inputs only).NEWLINE *args, **kwargs : optionalNEWLINE Additional arguments to matplotlib.pyplot.plotNEWLINE """NEWLINE # Handle facetgrids firstNEWLINE if row or col:NEWLINE allargs = locals().copy()NEWLINE allargs.update(allargs.pop('kwargs'))NEWLINE allargs.pop('darray')NEWLINE return _easy_facetgrid(darray, line, kind='line', **allargs)NEWLINENEWLINE ndims = len(darray.dims)NEWLINE if ndims > 2:NEWLINE raise ValueError('Line plots are for 1- or 2-dimensional DataArrays. 'NEWLINE 'Passed DataArray has {ndims} 'NEWLINE 'dimensions'.format(ndims=ndims))NEWLINENEWLINE # The allargs dict passed to _easy_facetgrid above contains argsNEWLINE if args is ():NEWLINE args = kwargs.pop('args', ())NEWLINE else:NEWLINE assert 'args' not in kwargsNEWLINENEWLINE ax = get_axis(figsize, size, aspect, ax)NEWLINE xplt, yplt, hueplt, xlabel, ylabel, huelabel = \NEWLINE _infer_line_data(darray, x, y, hue)NEWLINENEWLINE # Remove pd.Intervals if contained in xplt.values.NEWLINE if _valid_other_type(xplt.values, [pd.Interval]):NEWLINE # Is it a step plot? (see matplotlib.Axes.step)NEWLINE if kwargs.get('linestyle', '').startswith('steps-'):NEWLINE xplt_val, yplt_val = _interval_to_double_bound_points(xplt.values,NEWLINE yplt.values)NEWLINE # Remove steps-* to be sure that matplotlib is not confusedNEWLINE kwargs['linestyle'] = (kwargs['linestyle']NEWLINE .replace('steps-pre', '')NEWLINE .replace('steps-post', '')NEWLINE .replace('steps-mid', ''))NEWLINE if kwargs['linestyle'] == '':NEWLINE del kwargs['linestyle']NEWLINE else:NEWLINE xplt_val = _interval_to_mid_points(xplt.values)NEWLINE yplt_val = yplt.valuesNEWLINE xlabel += '_center'NEWLINE else:NEWLINE xplt_val = xplt.valuesNEWLINE yplt_val = yplt.valuesNEWLINENEWLINE _ensure_plottable(xplt_val, yplt_val)NEWLINENEWLINE primitive = ax.plot(xplt_val, yplt_val, *args, **kwargs)NEWLINENEWLINE if _labels:NEWLINE if xlabel is not None:NEWLINE ax.set_xlabel(xlabel)NEWLINENEWLINE if ylabel is not None:NEWLINE ax.set_ylabel(ylabel)NEWLINENEWLINE ax.set_title(darray._title_for_slice())NEWLINENEWLINE if darray.ndim == 2 and add_legend:NEWLINE ax.legend(handles=primitive,NEWLINE labels=list(hueplt.values),NEWLINE title=huelabel)NEWLINENEWLINE # Rotate dates on xlabelsNEWLINE # Do this without calling autofmt_xdate so that x-axes ticksNEWLINE # on other subplots (if any) are not deleted.NEWLINE # https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplotsNEWLINE if np.issubdtype(xplt.dtype, np.datetime64):NEWLINE for xlabels in ax.get_xticklabels():NEWLINE xlabels.set_rotation(30)NEWLINE xlabels.set_ha('right')NEWLINENEWLINE _update_axes(ax, xincrease, yincrease, xscale, yscale,NEWLINE xticks, yticks, xlim, ylim)NEWLINENEWLINE return primitiveNEWLINENEWLINENEWLINEdef step(darray, *args, where='pre', linestyle=None, ls=None, **kwargs):NEWLINE """NEWLINE Step plot of DataArray index against valuesNEWLINENEWLINE Similar to :func:`matplotlib:matplotlib.pyplot.step`NEWLINENEWLINE ParametersNEWLINE ----------NEWLINE where : {'pre', 'post', 'mid'}, optional, default 'pre'NEWLINE Define where the steps should be placed:NEWLINE - 'pre': The y value is continued constantly to the left fromNEWLINE every *x* position, i.e. the interval ``(x[i-1], x[i]]`` has theNEWLINE value ``y[i]``.NEWLINE - 'post': The y value is continued constantly to the right fromNEWLINE every *x* position, i.e. the interval ``[x[i], x[i+1])`` has theNEWLINE value ``y[i]``.NEWLINE - 'mid': Steps occur half-way between the *x* positions.NEWLINE Note that this parameter is ignored if the x coordinate consists ofNEWLINE :py:func:`pandas.Interval` values, e.g. as a result ofNEWLINE :py:func:`xarray.Dataset.groupby_bins`. In this case, the actualNEWLINE boundaries of the interval are used.NEWLINENEWLINE *args, **kwargs : optionalNEWLINE Additional arguments following :py:func:`xarray.plot.line`NEWLINE """NEWLINE if where not in {'pre', 'post', 'mid'}:NEWLINE raise ValueError("'where' argument to step must be "NEWLINE "'pre', 'post' or 'mid'")NEWLINENEWLINE if ls is not None:NEWLINE if linestyle is None:NEWLINE linestyle = lsNEWLINE else:NEWLINE raise TypeError('ls and linestyle are mutually exclusive')NEWLINE if linestyle is None:NEWLINE linestyle = ''NEWLINE linestyle = 'steps-' + where + linestyleNEWLINENEWLINE return line(darray, *args, linestyle=linestyle, **kwargs)NEWLINENEWLINENEWLINEdef hist(darray, figsize=None, size=None, aspect=None, ax=None,NEWLINE xincrease=None, yincrease=None, xscale=None, yscale=None,NEWLINE xticks=None, yticks=None, xlim=None, ylim=None, **kwargs):NEWLINE """NEWLINE Histogram of DataArrayNEWLINENEWLINE Wraps :func:`matplotlib:matplotlib.pyplot.hist`NEWLINENEWLINE Plots N dimensional arrays by first flattening the array.NEWLINENEWLINE ParametersNEWLINE ----------NEWLINE darray : DataArrayNEWLINE Can be any dimensionNEWLINE figsize : tuple, optionalNEWLINE A tuple (width, height) of the figure in inches.NEWLINE Mutually exclusive with ``size`` and ``ax``.NEWLINE aspect : scalar, optionalNEWLINE Aspect ratio of plot, so that ``aspect * size`` gives the width inNEWLINE inches. Only used if a ``size`` is provided.NEWLINE size : scalar, optionalNEWLINE If provided, create a new figure for the plot with the given size.NEWLINE Height (in inches) of each plot. See also: ``aspect``.NEWLINE ax : matplotlib axes object, optionalNEWLINE Axis on which to plot this figure. By default, use the current axis.NEWLINE Mutually exclusive with ``size`` and ``figsize``.NEWLINE **kwargs : optionalNEWLINE Additional keyword arguments to matplotlib.pyplot.histNEWLINENEWLINE """NEWLINE ax = get_axis(figsize, size, aspect, ax)NEWLINENEWLINE no_nan = np.ravel(darray.values)NEWLINE no_nan = no_nan[pd.notnull(no_nan)]NEWLINENEWLINE primitive = ax.hist(no_nan, **kwargs)NEWLINENEWLINE ax.set_title('Histogram')NEWLINE ax.set_xlabel(label_from_attrs(darray))NEWLINENEWLINE _update_axes(ax, xincrease, yincrease, xscale, yscale,NEWLINE xticks, yticks, xlim, ylim)NEWLINENEWLINE return primitiveNEWLINENEWLINENEWLINE# MUST run before any 2d plotting functions are defined sinceNEWLINE# _plot2d decorator adds them as methods here.NEWLINEclass _PlotMethods:NEWLINE """NEWLINE Enables use of xarray.plot functions as attributes on a DataArray.NEWLINE For example, DataArray.plot.imshowNEWLINE """NEWLINENEWLINE def __init__(self, darray):NEWLINE self._da = darrayNEWLINENEWLINE def __call__(self, **kwargs):NEWLINE return plot(self._da, **kwargs)NEWLINENEWLINE @functools.wraps(hist)NEWLINE def hist(self, ax=None, **kwargs):NEWLINE return hist(self._da, ax=ax, **kwargs)NEWLINENEWLINE @functools.wraps(line)NEWLINE def line(self, *args, **kwargs):NEWLINE return line(self._da, *args, **kwargs)NEWLINENEWLINE @functools.wraps(step)NEWLINE def step(self, *args, **kwargs):NEWLINE return step(self._da, *args, **kwargs)NEWLINENEWLINENEWLINEdef _plot2d(plotfunc):NEWLINE """NEWLINE Decorator for common 2d plotting logicNEWLINENEWLINE Also adds the 2d plot method to class _PlotMethodsNEWLINE """NEWLINE commondoc = """NEWLINE ParametersNEWLINE ----------NEWLINE darray : DataArrayNEWLINE Must be 2 dimensional, unless creating faceted plotsNEWLINE x : string, optionalNEWLINE Coordinate for x axis. If None use darray.dims[1]NEWLINE y : string, optionalNEWLINE Coordinate for y axis. If None use darray.dims[0]NEWLINE figsize : tuple, optionalNEWLINE A tuple (width, height) of the figure in inches.NEWLINE Mutually exclusive with ``size`` and ``ax``.NEWLINE aspect : scalar, optionalNEWLINE Aspect ratio of plot, so that ``aspect * size`` gives the width inNEWLINE inches. Only used if a ``size`` is provided.NEWLINE size : scalar, optionalNEWLINE If provided, create a new figure for the plot with the given size.NEWLINE Height (in inches) of each plot. See also: ``aspect``.NEWLINE ax : matplotlib axes object, optionalNEWLINE Axis on which to plot this figure. By default, use the current axis.NEWLINE Mutually exclusive with ``size`` and ``figsize``.NEWLINE row : string, optionalNEWLINE If passed, make row faceted plots on this dimension nameNEWLINE col : string, optionalNEWLINE If passed, make column faceted plots on this dimension nameNEWLINE col_wrap : integer, optionalNEWLINE Use together with ``col`` to wrap faceted plotsNEWLINE xscale, yscale : 'linear', 'symlog', 'log', 'logit', optionalNEWLINE Specifies scaling for the x- and y-axes respectivelyNEWLINE xticks, yticks : Specify tick locations for x- and y-axesNEWLINE xlim, ylim : Specify x- and y-axes limitsNEWLINE xincrease : None, True, or False, optionalNEWLINE Should the values on the x axes be increasing from left to right?NEWLINE if None, use the default for the matplotlib function.NEWLINE yincrease : None, True, or False, optionalNEWLINE Should the values on the y axes be increasing from top to bottom?NEWLINE if None, use the default for the matplotlib function.NEWLINE add_colorbar : Boolean, optionalNEWLINE Adds colorbar to axisNEWLINE add_labels : Boolean, optionalNEWLINE Use xarray metadata to label axesNEWLINE norm : ``matplotlib.colors.Normalize`` instance, optionalNEWLINE If the ``norm`` has vmin or vmax specified, the corresponding kwargNEWLINE must be None.NEWLINE vmin, vmax : floats, optionalNEWLINE Values to anchor the colormap, otherwise they are inferred from theNEWLINE data and other keyword arguments. When a diverging dataset is inferred,NEWLINE setting one of these values will fix the other by symmetry aroundNEWLINE ``center``. Setting both values prevents use of a diverging colormap.NEWLINE If discrete levels are provided as an explicit list, both of theseNEWLINE values are ignored.NEWLINE cmap : matplotlib colormap name or object, optionalNEWLINE The mapping from data values to color space. If not provided, thisNEWLINE will be either be ``viridis`` (if the function infers a sequentialNEWLINE dataset) or ``RdBu_r`` (if the function infers a diverging dataset).NEWLINE When `Seaborn` is installed, ``cmap`` may also be a `seaborn`NEWLINE color palette. If ``cmap`` is seaborn color palette and the plot typeNEWLINE is not ``contour`` or ``contourf``, ``levels`` must also be specified.NEWLINE colors : discrete colors to plot, optionalNEWLINE A single color or a list of colors. If the plot type is not ``contour``NEWLINE or ``contourf``, the ``levels`` argument is required.NEWLINE center : float, optionalNEWLINE The value at which to center the colormap. Passing this value impliesNEWLINE use of a diverging colormap. Setting it to ``False`` prevents use of aNEWLINE diverging colormap.NEWLINE robust : bool, optionalNEWLINE If True and ``vmin`` or ``vmax`` are absent, the colormap range isNEWLINE computed with 2nd and 98th percentiles instead of the extreme values.NEWLINE extend : {'neither', 'both', 'min', 'max'}, optionalNEWLINE How to draw arrows extending the colorbar beyond its limits. If notNEWLINE provided, extend is inferred from vmin, vmax and the data limits.NEWLINE levels : int or list-like object, optionalNEWLINE Split the colormap (cmap) into discrete color intervals. If an integerNEWLINE is provided, "nice" levels are chosen based on the data range: this canNEWLINE imply that the final number of levels is not exactly the expected one.NEWLINE Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent toNEWLINE setting ``levels=np.linspace(vmin, vmax, N)``.NEWLINE infer_intervals : bool, optionalNEWLINE Only applies to pcolormesh. If True, the coordinate intervals areNEWLINE passed to pcolormesh. If False, the original coordinates are usedNEWLINE (this can be useful for certain map projections). The default is toNEWLINE always infer intervals, unless the mesh is irregular and plotted onNEWLINE a map projection.NEWLINE subplot_kws : dict, optionalNEWLINE Dictionary of keyword arguments for matplotlib subplots. Only appliesNEWLINE to FacetGrid plotting.NEWLINE cbar_ax : matplotlib Axes, optionalNEWLINE Axes in which to draw the colorbar.NEWLINE cbar_kwargs : dict, optionalNEWLINE Dictionary of keyword arguments to pass to the colorbar.NEWLINE **kwargs : optionalNEWLINE Additional arguments to wrapped matplotlib functionNEWLINENEWLINE ReturnsNEWLINE -------NEWLINE artist :NEWLINE The same type of primitive artist that the wrapped matplotlibNEWLINE function returnsNEWLINE """NEWLINENEWLINE # Build on the original docstringNEWLINE plotfunc.__doc__ = '%s\n%s' % (plotfunc.__doc__, commondoc)NEWLINENEWLINE @functools.wraps(plotfunc)NEWLINE def newplotfunc(darray, x=None, y=None, figsize=None, size=None,NEWLINE aspect=None, ax=None, row=None, col=None,NEWLINE col_wrap=None, xincrease=True, yincrease=True,NEWLINE add_colorbar=None, add_labels=True, vmin=None, vmax=None,NEWLINE cmap=None, center=None, robust=False, extend=None,NEWLINE levels=None, infer_intervals=None, colors=None,NEWLINE subplot_kws=None, cbar_ax=None, cbar_kwargs=None,NEWLINE xscale=None, yscale=None, xticks=None, yticks=None,NEWLINE xlim=None, ylim=None, norm=None, **kwargs):NEWLINE # All 2d plots in xarray share this function signature.NEWLINE # Method signature below should be consistent.NEWLINENEWLINE # Decide on a default for the colorbar before facetgridsNEWLINE if add_colorbar is None:NEWLINE add_colorbar = plotfunc.__name__ != 'contour'NEWLINE imshow_rgb = (NEWLINE plotfunc.__name__ == 'imshow' andNEWLINE darray.ndim == (3 + (row is not None) + (col is not None)))NEWLINE if imshow_rgb:NEWLINE # Don't add a colorbar when showing an image with explicit colorsNEWLINE add_colorbar = FalseNEWLINE # Matplotlib does not support normalising RGB data, so do it here.NEWLINE # See eg. https://github.com/matplotlib/matplotlib/pull/10220NEWLINE if robust or vmax is not None or vmin is not None:NEWLINE darray = _rescale_imshow_rgb(darray, vmin, vmax, robust)NEWLINE vmin, vmax, robust = None, None, FalseNEWLINENEWLINE # Handle facetgrids firstNEWLINE if row or col:NEWLINE allargs = locals().copy()NEWLINE del allargs['darray']NEWLINE del allargs['imshow_rgb']NEWLINE allargs.update(allargs.pop('kwargs'))NEWLINE # Need the decorated plotting functionNEWLINE allargs['plotfunc'] = globals()[plotfunc.__name__]NEWLINE return _easy_facetgrid(darray, kind='dataarray', **allargs)NEWLINENEWLINE plt = import_matplotlib_pyplot()NEWLINENEWLINE rgb = kwargs.pop('rgb', None)NEWLINE if rgb is not None and plotfunc.__name__ != 'imshow':NEWLINE raise ValueError('The "rgb" keyword is only valid for imshow()')NEWLINE elif rgb is not None and not imshow_rgb:NEWLINE raise ValueError('The "rgb" keyword is only valid for imshow()'NEWLINE 'with a three-dimensional array (per facet)')NEWLINENEWLINE xlab, ylab = _infer_xy_labels(NEWLINE darray=darray, x=x, y=y, imshow=imshow_rgb, rgb=rgb)NEWLINENEWLINE # better to pass the ndarrays directly to plotting functionsNEWLINE xval = darray[xlab].valuesNEWLINE yval = darray[ylab].valuesNEWLINENEWLINE # check if we need to broadcast one dimensionNEWLINE if xval.ndim < yval.ndim:NEWLINE xval = np.broadcast_to(xval, yval.shape)NEWLINENEWLINE if yval.ndim < xval.ndim:NEWLINE yval = np.broadcast_to(yval, xval.shape)NEWLINENEWLINE # May need to transpose for correct x, y labelsNEWLINE # xlab may be the name of a coord, we have to check for dim namesNEWLINE if imshow_rgb:NEWLINE # For RGB[A] images, matplotlib requires the color dimensionNEWLINE # to be last. In Xarray the order should be unimportant, soNEWLINE # we transpose to (y, x, color) to make this work.NEWLINE yx_dims = (ylab, xlab)NEWLINE dims = yx_dims + tuple(d for d in darray.dims if d not in yx_dims)NEWLINE if dims != darray.dims:NEWLINE darray = darray.transpose(*dims, transpose_coords=True)NEWLINE elif darray[xlab].dims[-1] == darray.dims[0]:NEWLINE darray = darray.transpose(transpose_coords=True)NEWLINENEWLINE # Pass the data as a masked ndarray tooNEWLINE zval = darray.to_masked_array(copy=False)NEWLINENEWLINE # Replace pd.Intervals if contained in xval or yval.NEWLINE xplt, xlab_extra = _resolve_intervals_2dplot(xval, plotfunc.__name__)NEWLINE yplt, ylab_extra = _resolve_intervals_2dplot(yval, plotfunc.__name__)NEWLINENEWLINE _ensure_plottable(xplt, yplt)NEWLINENEWLINE cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(NEWLINE plotfunc, zval.data, **locals())NEWLINENEWLINE if 'contour' in plotfunc.__name__:NEWLINE # extend is a keyword argument only for contour and contourf, butNEWLINE # passing it to the colorbar is sufficient for imshow andNEWLINE # pcolormeshNEWLINE kwargs['extend'] = cmap_params['extend']NEWLINE kwargs['levels'] = cmap_params['levels']NEWLINE # if colors == a single color, matplotlib draws dashed negativeNEWLINE # contours. we lose this feature if we pass cmap and not colorsNEWLINE if isinstance(colors, str):NEWLINE cmap_params['cmap'] = NoneNEWLINE kwargs['colors'] = colorsNEWLINENEWLINE if 'pcolormesh' == plotfunc.__name__:NEWLINE kwargs['infer_intervals'] = infer_intervalsNEWLINENEWLINE if 'imshow' == plotfunc.__name__ and isinstance(aspect, str):NEWLINE # forbid usage of mpl stringsNEWLINE raise ValueError("plt.imshow's `aspect` kwarg is not available "NEWLINE "in xarray")NEWLINENEWLINE ax = get_axis(figsize, size, aspect, ax)NEWLINE primitive = plotfunc(xplt, yplt, zval, ax=ax, cmap=cmap_params['cmap'],NEWLINE vmin=cmap_params['vmin'],NEWLINE vmax=cmap_params['vmax'],NEWLINE norm=cmap_params['norm'],NEWLINE **kwargs)NEWLINENEWLINE # Label the plot with metadataNEWLINE if add_labels:NEWLINE ax.set_xlabel(label_from_attrs(darray[xlab], xlab_extra))NEWLINE ax.set_ylabel(label_from_attrs(darray[ylab], ylab_extra))NEWLINE ax.set_title(darray._title_for_slice())NEWLINENEWLINE if add_colorbar:NEWLINE if add_labels and 'label' not in cbar_kwargs:NEWLINE cbar_kwargs['label'] = label_from_attrs(darray)NEWLINE cbar = _add_colorbar(primitive, ax, cbar_ax, cbar_kwargs,NEWLINE cmap_params)NEWLINENEWLINE elif cbar_ax is not None or cbar_kwargs:NEWLINE # inform the user about keywords which aren't usedNEWLINE raise ValueError("cbar_ax and cbar_kwargs can't be used with "NEWLINE "add_colorbar=False.")NEWLINENEWLINE # origin kwarg overrides yincreaseNEWLINE if 'origin' in kwargs:NEWLINE yincrease = NoneNEWLINENEWLINE _update_axes(ax, xincrease, yincrease, xscale, yscale,NEWLINE xticks, yticks, xlim, ylim)NEWLINENEWLINE # Rotate dates on xlabelsNEWLINE # Do this without calling autofmt_xdate so that x-axes ticksNEWLINE # on other subplots (if any) are not deleted.NEWLINE # https://stackoverflow.com/questions/17430105/autofmt-xdate-deletes-x-axis-labels-of-all-subplotsNEWLINE if np.issubdtype(xplt.dtype, np.datetime64):NEWLINE for xlabels in ax.get_xticklabels():NEWLINE xlabels.set_rotation(30)NEWLINE xlabels.set_ha('right')NEWLINENEWLINE return primitiveNEWLINENEWLINE # For use as DataArray.plot.plotmethodNEWLINE @functools.wraps(newplotfunc)NEWLINE def plotmethod(_PlotMethods_obj, x=None, y=None, figsize=None, size=None,NEWLINE aspect=None, ax=None, row=None, col=None, col_wrap=None,NEWLINE xincrease=True, yincrease=True, add_colorbar=None,NEWLINE add_labels=True, vmin=None, vmax=None, cmap=None,NEWLINE colors=None, center=None, robust=False, extend=None,NEWLINE levels=None, infer_intervals=None, subplot_kws=None,NEWLINE cbar_ax=None, cbar_kwargs=None,NEWLINE xscale=None, yscale=None, xticks=None, yticks=None,NEWLINE xlim=None, ylim=None, norm=None, **kwargs):NEWLINE """NEWLINE The method should have the same signature as the function.NEWLINENEWLINE This just makes the method work on Plotmethods objects,NEWLINE and passes all the other arguments straight through.NEWLINE """NEWLINE allargs = locals()NEWLINE allargs['darray'] = _PlotMethods_obj._daNEWLINE allargs.update(kwargs)NEWLINE for arg in ['_PlotMethods_obj', 'newplotfunc', 'kwargs']:NEWLINE del allargs[arg]NEWLINE return newplotfunc(**allargs)NEWLINENEWLINE # Add to class _PlotMethodsNEWLINE setattr(_PlotMethods, plotmethod.__name__, plotmethod)NEWLINENEWLINE return newplotfuncNEWLINENEWLINENEWLINE@_plot2dNEWLINEdef imshow(x, y, z, ax, **kwargs):NEWLINE """NEWLINE Image plot of 2d DataArray using matplotlib.pyplotNEWLINENEWLINE Wraps :func:`matplotlib:matplotlib.pyplot.imshow`NEWLINENEWLINE While other plot methods require the DataArray to be strictlyNEWLINE two-dimensional, ``imshow`` also accepts a 3D array where someNEWLINE dimension can be interpreted as RGB or RGBA color channels andNEWLINE allows this dimension to be specified via the kwarg ``rgb=``.NEWLINENEWLINE Unlike matplotlib, Xarray can apply ``vmin`` and ``vmax`` to RGB or RGBANEWLINE data, by applying a single scaling factor and offset to all bands.NEWLINE Passing ``robust=True`` infers ``vmin`` and ``vmax``NEWLINE :ref:`in the usual way <robust-plotting>`.NEWLINENEWLINE .. note::NEWLINE This function needs uniformly spaced coordinates toNEWLINE properly label the axes. Call DataArray.plot() to check.NEWLINENEWLINE The pixels are centered on the coordinates values. Ie, if the coordinateNEWLINE value is 3.2 then the pixels for those coordinates will be centered on 3.2.NEWLINE """NEWLINENEWLINE if x.ndim != 1 or y.ndim != 1:NEWLINE raise ValueError('imshow requires 1D coordinates, try using 'NEWLINE 'pcolormesh or contour(f)')NEWLINENEWLINE # Centering the pixels- Assumes uniform spacingNEWLINE try:NEWLINE xstep = (x[1] - x[0]) / 2.0NEWLINE except IndexError:NEWLINE # Arbitrary default value, similar to matplotlib behaviourNEWLINE xstep = .1NEWLINE try:NEWLINE ystep = (y[1] - y[0]) / 2.0NEWLINE except IndexError:NEWLINE ystep = .1NEWLINE left, right = x[0] - xstep, x[-1] + xstepNEWLINE bottom, top = y[-1] + ystep, y[0] - ystepNEWLINENEWLINE defaults = {'origin': 'upper',NEWLINE 'interpolation': 'nearest'}NEWLINENEWLINE if not hasattr(ax, 'projection'):NEWLINE # not for cartopy geoaxesNEWLINE defaults['aspect'] = 'auto'NEWLINENEWLINE # Allow user to override these defaultsNEWLINE defaults.update(kwargs)NEWLINENEWLINE if defaults['origin'] == 'upper':NEWLINE defaults['extent'] = [left, right, bottom, top]NEWLINE else:NEWLINE defaults['extent'] = [left, right, top, bottom]NEWLINENEWLINE if z.ndim == 3:NEWLINE # matplotlib imshow uses black for missing data, but Xarray makesNEWLINE # missing data transparent. We therefore add an alpha channel ifNEWLINE # there isn't one, and set it to transparent where data is masked.NEWLINE if z.shape[-1] == 3:NEWLINE alpha = np.ma.ones(z.shape[:2] + (1,), dtype=z.dtype)NEWLINE if np.issubdtype(z.dtype, np.integer):NEWLINE alpha *= 255NEWLINE z = np.ma.concatenate((z, alpha), axis=2)NEWLINE else:NEWLINE z = z.copy()NEWLINE z[np.any(z.mask, axis=-1), -1] = 0NEWLINENEWLINE primitive = ax.imshow(z, **defaults)NEWLINENEWLINE return primitiveNEWLINENEWLINENEWLINE@_plot2dNEWLINEdef contour(x, y, z, ax, **kwargs):NEWLINE """NEWLINE Contour plot of 2d DataArrayNEWLINENEWLINE Wraps :func:`matplotlib:matplotlib.pyplot.contour`NEWLINE """NEWLINE primitive = ax.contour(x, y, z, **kwargs)NEWLINE return primitiveNEWLINENEWLINENEWLINE@_plot2dNEWLINEdef contourf(x, y, z, ax, **kwargs):NEWLINE """NEWLINE Filled contour plot of 2d DataArrayNEWLINENEWLINE Wraps :func:`matplotlib:matplotlib.pyplot.contourf`NEWLINE """NEWLINE primitive = ax.contourf(x, y, z, **kwargs)NEWLINE return primitiveNEWLINENEWLINENEWLINE@_plot2dNEWLINEdef pcolormesh(x, y, z, ax, infer_intervals=None, **kwargs):NEWLINE """NEWLINE Pseudocolor plot of 2d DataArrayNEWLINENEWLINE Wraps :func:`matplotlib:matplotlib.pyplot.pcolormesh`NEWLINE """NEWLINENEWLINE # decide on a default for infer_intervals (GH781)NEWLINE x = np.asarray(x)NEWLINE if infer_intervals is None:NEWLINE if hasattr(ax, 'projection'):NEWLINE if len(x.shape) == 1:NEWLINE infer_intervals = TrueNEWLINE else:NEWLINE infer_intervals = FalseNEWLINE else:NEWLINE infer_intervals = TrueNEWLINENEWLINE if (infer_intervals andNEWLINE ((np.shape(x)[0] == np.shape(z)[1]) orNEWLINE ((x.ndim > 1) and (np.shape(x)[1] == np.shape(z)[1])))):NEWLINE if len(x.shape) == 1:NEWLINE x = _infer_interval_breaks(x, check_monotonic=True)NEWLINE else:NEWLINE # we have to infer the intervals on both axesNEWLINE x = _infer_interval_breaks(x, axis=1)NEWLINE x = _infer_interval_breaks(x, axis=0)NEWLINENEWLINE if (infer_intervals andNEWLINE (np.shape(y)[0] == np.shape(z)[0])):NEWLINE if len(y.shape) == 1:NEWLINE y = _infer_interval_breaks(y, check_monotonic=True)NEWLINE else:NEWLINE # we have to infer the intervals on both axesNEWLINE y = _infer_interval_breaks(y, axis=1)NEWLINE y = _infer_interval_breaks(y, axis=0)NEWLINENEWLINE primitive = ax.pcolormesh(x, y, z, **kwargs)NEWLINENEWLINE # by default, pcolormesh picks "round" values for boundsNEWLINE # this results in ugly looking plots with lots of surrounding whitespaceNEWLINE if not hasattr(ax, 'projection') and x.ndim == 1 and y.ndim == 1:NEWLINE # not a cartopy geoaxisNEWLINE ax.set_xlim(x[0], x[-1])NEWLINE ax.set_ylim(y[0], y[-1])NEWLINENEWLINE return primitiveNEWLINE
"""NEWLINESprite Collect CoinsNEWLINENEWLINESimple program to show basic sprite usage.NEWLINENEWLINEArtwork from http://kenney.nlNEWLINE"""NEWLINEimport randomNEWLINEimport arcadeNEWLINENEWLINESPRITE_SCALING = 0.5NEWLINENEWLINESCREEN_WIDTH = 800NEWLINESCREEN_HEIGHT = 600NEWLINENEWLINEwindow = NoneNEWLINENEWLINENEWLINEclass MyApplication(arcade.Window):NEWLINE """ Main application class. """NEWLINENEWLINE def setup(self):NEWLINE """ Set up the game and initialize the variables. """NEWLINENEWLINE # Sprite listsNEWLINE self.all_sprites_list = arcade.SpriteList()NEWLINE self.coin_list = arcade.SpriteList()NEWLINENEWLINE # Set up the playerNEWLINE self.score = 0NEWLINE self.player_sprite = arcade.Sprite("images/character.png",NEWLINE SPRITE_SCALING)NEWLINE self.player_sprite.center_x = 50NEWLINE self.player_sprite.center_y = 50NEWLINE self.all_sprites_list.append(self.player_sprite)NEWLINENEWLINE for i in range(50):NEWLINENEWLINE # Create the coin instanceNEWLINE coin = arcade.Sprite("images/coin_01.png", SPRITE_SCALING / 3)NEWLINENEWLINE # Position the coinNEWLINE coin.center_x = random.randrange(SCREEN_WIDTH)NEWLINE coin.center_y = random.randrange(SCREEN_HEIGHT)NEWLINENEWLINE # Add the coin to the listsNEWLINE self.all_sprites_list.append(coin)NEWLINE self.coin_list.append(coin)NEWLINENEWLINE # Don't show the mouse cursorNEWLINE self.set_mouse_visible(False)NEWLINENEWLINE # Set the background colorNEWLINE arcade.set_background_color(arcade.color.AMAZON)NEWLINENEWLINE def on_draw(self):NEWLINE """NEWLINE Render the screen.NEWLINE """NEWLINENEWLINE # This command has to happen before we start drawingNEWLINE arcade.start_render()NEWLINENEWLINE # Draw all the sprites.NEWLINE self.all_sprites_list.draw()NEWLINENEWLINE # Put the text on the screen.NEWLINE output = "Score: {}".format(self.score)NEWLINE arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)NEWLINENEWLINE def on_mouse_motion(self, x, y, dx, dy):NEWLINE """NEWLINE Called whenever the mouse moves.NEWLINE """NEWLINE self.player_sprite.center_x = xNEWLINE self.player_sprite.center_y = yNEWLINENEWLINE def animate(self, delta_time):NEWLINE """ Movement and game logic """NEWLINENEWLINE # Call update on all sprites (The sprites don't do much in thisNEWLINE # example though.)NEWLINE self.all_sprites_list.update()NEWLINENEWLINE # Generate a list of all sprites that collided with the player.NEWLINE hit_list = \NEWLINE arcade.check_for_collision_with_list(self.player_sprite,NEWLINE self.coin_list)NEWLINENEWLINE # Loop through each colliding sprite, remove it, and add to the score.NEWLINE for coin in hit_list:NEWLINE coin.kill()NEWLINE self.score += 1NEWLINENEWLINENEWLINEwindow = MyApplication(SCREEN_WIDTH, SCREEN_HEIGHT)NEWLINEwindow.setup()NEWLINENEWLINEarcade.run()NEWLINE
import osNEWLINENEWLINEfrom celery import CeleryNEWLINENEWLINE# set the default Django settings module for the 'celery' program.NEWLINEos.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")NEWLINENEWLINEapp = Celery("finance_api")NEWLINENEWLINE# Using a string here means the worker doesn't have to serializeNEWLINE# the configuration object to child processes.NEWLINE# - namespace='CELERY' means all celery-related configuration keysNEWLINE# should have a `CELERY_` prefix.NEWLINEapp.config_from_object("django.conf:settings", namespace="CELERY")NEWLINENEWLINE# Load task modules from all registered Django app configs.NEWLINEapp.autodiscover_tasks()NEWLINE
# coding: utf-8NEWLINENEWLINE"""NEWLINE convertapiNEWLINENEWLINE Convert API lets you effortlessly convert file formats and types. # noqa: E501NEWLINENEWLINE OpenAPI spec version: v1NEWLINE NEWLINE Generated by: https://github.com/swagger-api/swagger-codegen.gitNEWLINE"""NEWLINENEWLINENEWLINEimport pprintNEWLINEimport re # noqa: F401NEWLINENEWLINEimport sixNEWLINENEWLINENEWLINEclass SetFormFieldValue(object):NEWLINE """NOTE: This class is auto generated by the swagger code generator program.NEWLINENEWLINE Do not edit the class manually.NEWLINE """NEWLINENEWLINE """NEWLINE Attributes:NEWLINE swagger_types (dict): The key is attribute nameNEWLINE and the value is attribute type.NEWLINE attribute_map (dict): The key is attribute nameNEWLINE and the value is json key in definition.NEWLINE """NEWLINE swagger_types = {NEWLINE 'field_name': 'str',NEWLINE 'text_value': 'str',NEWLINE 'checkbox_value': 'bool',NEWLINE 'combo_box_selected_index': 'int'NEWLINE }NEWLINENEWLINE attribute_map = {NEWLINE 'field_name': 'FieldName',NEWLINE 'text_value': 'TextValue',NEWLINE 'checkbox_value': 'CheckboxValue',NEWLINE 'combo_box_selected_index': 'ComboBoxSelectedIndex'NEWLINE }NEWLINENEWLINE def __init__(self, field_name=None, text_value=None, checkbox_value=None, combo_box_selected_index=None): # noqa: E501NEWLINE """SetFormFieldValue - a model defined in Swagger""" # noqa: E501NEWLINENEWLINE self._field_name = NoneNEWLINE self._text_value = NoneNEWLINE self._checkbox_value = NoneNEWLINE self._combo_box_selected_index = NoneNEWLINE self.discriminator = NoneNEWLINENEWLINE if field_name is not None:NEWLINE self.field_name = field_nameNEWLINE if text_value is not None:NEWLINE self.text_value = text_valueNEWLINE if checkbox_value is not None:NEWLINE self.checkbox_value = checkbox_valueNEWLINE if combo_box_selected_index is not None:NEWLINE self.combo_box_selected_index = combo_box_selected_indexNEWLINENEWLINE @propertyNEWLINE def field_name(self):NEWLINE """Gets the field_name of this SetFormFieldValue. # noqa: E501NEWLINENEWLINE Name of the field to set; you can call /convert/edit/pdf/form/get-fields to enumerate field names in a form # noqa: E501NEWLINENEWLINE :return: The field_name of this SetFormFieldValue. # noqa: E501NEWLINE :rtype: strNEWLINE """NEWLINE return self._field_nameNEWLINENEWLINE @field_name.setterNEWLINE def field_name(self, field_name):NEWLINE """Sets the field_name of this SetFormFieldValue.NEWLINENEWLINE Name of the field to set; you can call /convert/edit/pdf/form/get-fields to enumerate field names in a form # noqa: E501NEWLINENEWLINE :param field_name: The field_name of this SetFormFieldValue. # noqa: E501NEWLINE :type: strNEWLINE """NEWLINENEWLINE self._field_name = field_nameNEWLINENEWLINE @propertyNEWLINE def text_value(self):NEWLINE """Gets the text_value of this SetFormFieldValue. # noqa: E501NEWLINENEWLINE For fields of type Text, the text value to put into the field # noqa: E501NEWLINENEWLINE :return: The text_value of this SetFormFieldValue. # noqa: E501NEWLINE :rtype: strNEWLINE """NEWLINE return self._text_valueNEWLINENEWLINE @text_value.setterNEWLINE def text_value(self, text_value):NEWLINE """Sets the text_value of this SetFormFieldValue.NEWLINENEWLINE For fields of type Text, the text value to put into the field # noqa: E501NEWLINENEWLINE :param text_value: The text_value of this SetFormFieldValue. # noqa: E501NEWLINE :type: strNEWLINE """NEWLINENEWLINE self._text_value = text_valueNEWLINENEWLINE @propertyNEWLINE def checkbox_value(self):NEWLINE """Gets the checkbox_value of this SetFormFieldValue. # noqa: E501NEWLINENEWLINE For fields of type Checkbox, the value to put into the field # noqa: E501NEWLINENEWLINE :return: The checkbox_value of this SetFormFieldValue. # noqa: E501NEWLINE :rtype: boolNEWLINE """NEWLINE return self._checkbox_valueNEWLINENEWLINE @checkbox_value.setterNEWLINE def checkbox_value(self, checkbox_value):NEWLINE """Sets the checkbox_value of this SetFormFieldValue.NEWLINENEWLINE For fields of type Checkbox, the value to put into the field # noqa: E501NEWLINENEWLINE :param checkbox_value: The checkbox_value of this SetFormFieldValue. # noqa: E501NEWLINE :type: boolNEWLINE """NEWLINENEWLINE self._checkbox_value = checkbox_valueNEWLINENEWLINE @propertyNEWLINE def combo_box_selected_index(self):NEWLINE """Gets the combo_box_selected_index of this SetFormFieldValue. # noqa: E501NEWLINENEWLINE For fields of type ComboBox; specifies the selected index of the combo box selection # noqa: E501NEWLINENEWLINE :return: The combo_box_selected_index of this SetFormFieldValue. # noqa: E501NEWLINE :rtype: intNEWLINE """NEWLINE return self._combo_box_selected_indexNEWLINENEWLINE @combo_box_selected_index.setterNEWLINE def combo_box_selected_index(self, combo_box_selected_index):NEWLINE """Sets the combo_box_selected_index of this SetFormFieldValue.NEWLINENEWLINE For fields of type ComboBox; specifies the selected index of the combo box selection # noqa: E501NEWLINENEWLINE :param combo_box_selected_index: The combo_box_selected_index of this SetFormFieldValue. # noqa: E501NEWLINE :type: intNEWLINE """NEWLINENEWLINE self._combo_box_selected_index = combo_box_selected_indexNEWLINENEWLINE def to_dict(self):NEWLINE """Returns the model properties as a dict"""NEWLINE result = {}NEWLINENEWLINE for attr, _ in six.iteritems(self.swagger_types):NEWLINE value = getattr(self, attr)NEWLINE if isinstance(value, list):NEWLINE result[attr] = list(map(NEWLINE lambda x: x.to_dict() if hasattr(x, "to_dict") else x,NEWLINE valueNEWLINE ))NEWLINE elif hasattr(value, "to_dict"):NEWLINE result[attr] = value.to_dict()NEWLINE elif isinstance(value, dict):NEWLINE result[attr] = dict(map(NEWLINE lambda item: (item[0], item[1].to_dict())NEWLINE if hasattr(item[1], "to_dict") else item,NEWLINE value.items()NEWLINE ))NEWLINE else:NEWLINE result[attr] = valueNEWLINE if issubclass(SetFormFieldValue, dict):NEWLINE for key, value in self.items():NEWLINE result[key] = valueNEWLINENEWLINE return resultNEWLINENEWLINE def to_str(self):NEWLINE """Returns the string representation of the model"""NEWLINE return pprint.pformat(self.to_dict())NEWLINENEWLINE def __repr__(self):NEWLINE """For `print` and `pprint`"""NEWLINE return self.to_str()NEWLINENEWLINE def __eq__(self, other):NEWLINE """Returns true if both objects are equal"""NEWLINE if not isinstance(other, SetFormFieldValue):NEWLINE return FalseNEWLINENEWLINE return self.__dict__ == other.__dict__NEWLINENEWLINE def __ne__(self, other):NEWLINE """Returns true if both objects are not equal"""NEWLINE return not self == otherNEWLINE
"""NEWLINEUnit Test ConfigurationNEWLINE"""NEWLINEimport loggingNEWLINEimport osNEWLINEimport sqlite3NEWLINENEWLINEfrom test.common.utilities import get_mock_qboNEWLINENEWLINEimport pytestNEWLINEfrom qbo_db_connector import QuickbooksExtractConnector, QuickbooksLoadConnectorNEWLINENEWLINElogger = logging.getLogger(__name__)NEWLINENEWLINENEWLINE@pytest.fixtureNEWLINEdef qbo():NEWLINE """NEWLINE Quickbooks Online SDK Mock ObjectNEWLINE """NEWLINE return get_mock_qbo()NEWLINENEWLINENEWLINE@pytest.fixtureNEWLINEdef dbconn():NEWLINE """NEWLINE Make DB ConnectionNEWLINE :return: DB ConnectionNEWLINE """NEWLINE sqlite_db_file = '/tmp/test_qbo.db'NEWLINE if os.path.exists(sqlite_db_file):NEWLINE os.remove(sqlite_db_file)NEWLINE conn = sqlite3.connect(sqlite_db_file, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)NEWLINE return connNEWLINENEWLINENEWLINE@pytest.fixtureNEWLINEdef qec(qbo, dbconn):NEWLINE """NEWLINE Quickbooks Extract instance with mock connectionNEWLINE """NEWLINE res = QuickbooksExtractConnector(qbo_connection=qbo, dbconn=dbconn)NEWLINE res.create_tables()NEWLINE return resNEWLINENEWLINENEWLINE@pytest.fixtureNEWLINEdef qlc(qbo, dbconn):NEWLINE """NEWLINE Quickbooks Load instance with mock connectionNEWLINE """NEWLINE res = QuickbooksLoadConnector(qbo_connection=qbo, dbconn=dbconn)NEWLINE res.create_tables()NEWLINE return resNEWLINE
"""NEWLINEMIT LicenseNEWLINENEWLINECopyright (c) 2020-present phenom4n4nNEWLINENEWLINEPermission is hereby granted, free of charge, to any person obtaining a copyNEWLINEof this software and associated documentation files (the "Software"), to dealNEWLINEin the Software without restriction, including without limitation the rightsNEWLINEto use, copy, modify, merge, publish, distribute, sublicense, and/or sellNEWLINEcopies of the Software, and to permit persons to whom the Software isNEWLINEfurnished to do so, subject to the following conditions:NEWLINENEWLINEThe above copyright notice and this permission notice shall be included in allNEWLINEcopies or substantial portions of the Software.NEWLINENEWLINETHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORNEWLINEIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,NEWLINEFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THENEWLINEAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERNEWLINELIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,NEWLINEOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THENEWLINESOFTWARE.NEWLINE"""NEWLINENEWLINEimport loggingNEWLINEfrom collections import defaultdictNEWLINEfrom colorsys import rgb_to_hsvNEWLINEfrom typing import List, OptionalNEWLINENEWLINEimport discordNEWLINEfrom redbot.core import commandsNEWLINEfrom redbot.core.utils.chat_formatting import humanize_number as hnNEWLINEfrom redbot.core.utils.chat_formatting import pagify, text_to_fileNEWLINEfrom redbot.core.utils.mod import get_audit_reasonNEWLINEfrom TagScriptEngine import Interpreter, LooseVariableGetterBlock, MemberAdapterNEWLINENEWLINEfrom .abc import MixinMetaNEWLINEfrom .converters import FuzzyRole, StrictRole, TargeterArgs, TouchableMemberNEWLINEfrom .utils import (NEWLINE can_run_command,NEWLINE guild_roughly_chunked,NEWLINE humanize_roles,NEWLINE is_allowed_by_role_hierarchy,NEWLINE)NEWLINENEWLINElog = logging.getLogger("red.phenom4n4n.roleutils")NEWLINENEWLINENEWLINEdef targeter_cog(ctx: commands.Context):NEWLINE cog = ctx.bot.get_cog("Targeter")NEWLINE return cog is not None and hasattr(cog, "args_to_list")NEWLINENEWLINENEWLINEdef chunks(l, n):NEWLINE """NEWLINE Yield successive n-sized chunks from l.NEWLINE https://github.com/flaree/flare-cogs/blob/08b78e33ab814aa4da5422d81a5037ae3df51d4e/commandstats/commandstats.py#L16NEWLINE """NEWLINE for i in range(0, len(l), n):NEWLINE yield l[i : i + n]NEWLINENEWLINENEWLINEclass Roles(MixinMeta):NEWLINE """NEWLINE Useful role commands.NEWLINE """NEWLINENEWLINE def __init__(self):NEWLINE self.interpreter = Interpreter([LooseVariableGetterBlock()])NEWLINE super().__init__()NEWLINENEWLINE async def initialize(self):NEWLINE log.debug("Roles Initialize")NEWLINE await super().initialize()NEWLINENEWLINE @commands.guild_only()NEWLINE @commands.group(invoke_without_command=True)NEWLINE async def role(NEWLINE self, ctx: commands.Context, member: TouchableMember(False), *, role: StrictRole(False)NEWLINE ):NEWLINE """Base command for modifying roles.NEWLINENEWLINE Invoking this command will add or remove the given role from the member, depending on whether they already had it."""NEWLINE if role in member.roles and await can_run_command(ctx, "role remove"):NEWLINE com = self.bot.get_command("role remove")NEWLINE await ctx.invoke(NEWLINE com,NEWLINE member=member,NEWLINE role=role,NEWLINE )NEWLINE elif role not in member.roles and await can_run_command(ctx, "role add"):NEWLINE com = self.bot.get_command("role add")NEWLINE await ctx.invoke(NEWLINE com,NEWLINE member=member,NEWLINE role=role,NEWLINE )NEWLINE else:NEWLINE await ctx.send_help()NEWLINENEWLINE @commands.bot_has_permissions(embed_links=True)NEWLINE @role.command("info")NEWLINE async def role_info(self, ctx: commands.Context, *, role: FuzzyRole):NEWLINE """Get information about a role."""NEWLINE await ctx.send(embed=await self.get_info(role))NEWLINENEWLINE async def get_info(self, role: discord.Role) -> discord.Embed:NEWLINE if guild_roughly_chunked(role.guild) is False and self.bot.intents.members:NEWLINE await role.guild.chunk()NEWLINE description = [NEWLINE f"{role.mention}",NEWLINE f"Members: {len(role.members)} | Position: {role.position}",NEWLINE f"Color: {role.color}",NEWLINE f"Hoisted: {role.hoist}",NEWLINE f"Mentionable: {role.mentionable}",NEWLINE ]NEWLINE if role.managed:NEWLINE description.append(f"Managed: {role.managed}")NEWLINE if role in await self.bot.get_mod_roles(role.guild):NEWLINE description.append(f"Mod Role: True")NEWLINE if role in await self.bot.get_admin_roles(role.guild):NEWLINE description.append(f"Admin Role: True")NEWLINE e = discord.Embed(NEWLINE color=role.color,NEWLINE title=role.name,NEWLINE description="\n".join(description),NEWLINE timestamp=role.created_at,NEWLINE )NEWLINE e.set_footer(text=role.id)NEWLINE return eNEWLINENEWLINE def format_member(self, member: discord.Member, formatting: str) -> str:NEWLINE output = self.interpreter.process(formatting, {"member": MemberAdapter(member)})NEWLINE return output.bodyNEWLINENEWLINE @commands.bot_has_permissions(attach_files=True)NEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @role.command("members", aliases=["dump"])NEWLINE async def role_members(NEWLINE self,NEWLINE ctx: commands.Context,NEWLINE role: FuzzyRole,NEWLINE *,NEWLINE formatting: str = "{member} - {member(id)}",NEWLINE ):NEWLINE """NEWLINE Sends a list of members in a role.NEWLINENEWLINE You can supply a custom formatting tagscript for each member.NEWLINE The [member](https://phen-cogs.readthedocs.io/en/latest/tags/default_variables.html#author-block) block is available to use, found on the [TagScript documentation](https://phen-cogs.readthedocs.io/en/latest/index.html).NEWLINENEWLINE **Example:**NEWLINE `[p]role dump @admin <t:{member(timestamp)}> - {member(mention)}`NEWLINE """NEWLINE if guild_roughly_chunked(ctx.guild) is False and self.bot.intents.members:NEWLINE await ctx.guild.chunk()NEWLINE if not role.members:NEWLINE return await ctx.send(f"**{role}** has no members.")NEWLINE members = "\n".join(self.format_member(member, formatting) for member in role.members)NEWLINE if len(members) > 2000:NEWLINE await ctx.send(file=text_to_file(members, f"members.txt"))NEWLINE else:NEWLINE await ctx.send(members, allowed_mentions=discord.AllowedMentions.none())NEWLINENEWLINE @staticmethodNEWLINE def get_hsv(role: discord.Role):NEWLINE return rgb_to_hsv(*role.color.to_rgb())NEWLINENEWLINE @commands.bot_has_permissions(embed_links=True)NEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @role.command("colors")NEWLINE async def role_colors(self, ctx: commands.Context):NEWLINE """Sends the server's roles, ordered by color."""NEWLINE roles = defaultdict(list)NEWLINE for r in ctx.guild.roles:NEWLINE roles[str(r.color)].append(r)NEWLINE roles = dict(sorted(roles.items(), key=lambda v: self.get_hsv(v[1][0])))NEWLINENEWLINE lines = [f"**{color}**\n{' '.join(r.mention for r in rs)}" for color, rs in roles.items()]NEWLINE for page in pagify("\n".join(lines)):NEWLINE e = discord.Embed(description=page)NEWLINE await ctx.send(embed=e)NEWLINENEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @role.command("create")NEWLINE async def role_create(NEWLINE self,NEWLINE ctx: commands.Context,NEWLINE color: Optional[discord.Color] = discord.Color.default(),NEWLINE hoist: Optional[bool] = False,NEWLINE *,NEWLINE name: str = None,NEWLINE ):NEWLINE """NEWLINE Creates a role.NEWLINENEWLINE Color and whether it is hoisted can be specified.NEWLINE """NEWLINE if len(ctx.guild.roles) >= 250:NEWLINE return await ctx.send("This server has reached the maximum role limit (250).")NEWLINENEWLINE role = await ctx.guild.create_role(name=name, colour=color, hoist=hoist)NEWLINE await ctx.send(f"**{role}** created!", embed=await self.get_info(role))NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command("color", aliases=["colour"])NEWLINE async def role_color(NEWLINE self, ctx: commands.Context, role: StrictRole(check_integrated=False), color: discord.ColorNEWLINE ):NEWLINE """Change a role's color."""NEWLINE await role.edit(color=color)NEWLINE await ctx.send(NEWLINE f"**{role}** color changed to **{color}**.", embed=await self.get_info(role)NEWLINE )NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command("hoist")NEWLINE async def role_hoist(NEWLINE self,NEWLINE ctx: commands.Context,NEWLINE role: StrictRole(check_integrated=False),NEWLINE hoisted: bool = None,NEWLINE ):NEWLINE """Toggle whether a role should appear seperate from other roles."""NEWLINE hoisted = hoisted if hoisted is not None else not role.hoistNEWLINE await role.edit(hoist=hoisted)NEWLINE now = "now" if hoisted else "no longer"NEWLINE await ctx.send(f"**{role}** is {now} hoisted.", embed=await self.get_info(role))NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command("name")NEWLINE async def role_name(NEWLINE self, ctx: commands.Context, role: StrictRole(check_integrated=False), *, name: strNEWLINE ):NEWLINE """Change a role's name."""NEWLINE old_name = role.nameNEWLINE await role.edit(name=name)NEWLINE await ctx.send(f"Changed **{old_name}** to **{name}**.", embed=await self.get_info(role))NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command("add")NEWLINE async def role_add(self, ctx: commands.Context, member: TouchableMember, *, role: StrictRole):NEWLINE """Add a role to a member."""NEWLINE if role in member.roles:NEWLINE await ctx.send(NEWLINE f"**{member}** already has the role **{role}**. Maybe try removing it instead."NEWLINE )NEWLINE returnNEWLINE reason = get_audit_reason(ctx.author)NEWLINE await member.add_roles(role, reason=reason)NEWLINE await ctx.send(f"Added **{role.name}** to **{member}**.")NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command("remove")NEWLINE async def role_remove(NEWLINE self, ctx: commands.Context, member: TouchableMember, *, role: StrictRoleNEWLINE ):NEWLINE """Remove a role from a member."""NEWLINE if role not in member.roles:NEWLINE await ctx.send(NEWLINE f"**{member}** doesn't have the role **{role}**. Maybe try adding it instead."NEWLINE )NEWLINE returnNEWLINE reason = get_audit_reason(ctx.author)NEWLINE await member.remove_roles(role, reason=reason)NEWLINE await ctx.send(f"Removed **{role.name}** from **{member}**.")NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command(require_var_positional=True)NEWLINE async def addmulti(self, ctx: commands.Context, role: StrictRole, *members: TouchableMember):NEWLINE """Add a role to multiple members."""NEWLINE reason = get_audit_reason(ctx.author)NEWLINE already_members = []NEWLINE success_members = []NEWLINE for member in members:NEWLINE if role not in member.roles:NEWLINE await member.add_roles(role, reason=reason)NEWLINE success_members.append(member)NEWLINE else:NEWLINE already_members.append(member)NEWLINE msg = []NEWLINE if success_members:NEWLINE msg.append(f"Added **{role}** to {humanize_roles(success_members)}.")NEWLINE if already_members:NEWLINE msg.append(f"{humanize_roles(already_members)} already had **{role}**.")NEWLINE await ctx.send("\n".join(msg))NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command(require_var_positional=True)NEWLINE async def removemulti(NEWLINE self, ctx: commands.Context, role: StrictRole, *members: TouchableMemberNEWLINE ):NEWLINE """Remove a role from multiple members."""NEWLINE reason = get_audit_reason(ctx.author)NEWLINE already_members = []NEWLINE success_members = []NEWLINE for member in members:NEWLINE if role in member.roles:NEWLINE await member.remove_roles(role, reason=reason)NEWLINE success_members.append(member)NEWLINE else:NEWLINE already_members.append(member)NEWLINE msg = []NEWLINE if success_members:NEWLINE msg.append(f"Removed **{role}** from {humanize_roles(success_members)}.")NEWLINE if already_members:NEWLINE msg.append(f"{humanize_roles(already_members)} didn't have **{role}**.")NEWLINE await ctx.send("\n".join(msg))NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @commands.group(invoke_without_command=True, require_var_positional=True)NEWLINE async def multirole(self, ctx: commands.Context, member: TouchableMember, *roles: StrictRole):NEWLINE """Add multiple roles to a member."""NEWLINE not_allowed = []NEWLINE already_added = []NEWLINE to_add = []NEWLINE for role in roles:NEWLINE allowed = await is_allowed_by_role_hierarchy(self.bot, ctx.me, ctx.author, role)NEWLINE if not allowed[0]:NEWLINE not_allowed.append(role)NEWLINE elif role in member.roles:NEWLINE already_added.append(role)NEWLINE else:NEWLINE to_add.append(role)NEWLINE reason = get_audit_reason(ctx.author)NEWLINE msg = []NEWLINE if to_add:NEWLINE await member.add_roles(*to_add, reason=reason)NEWLINE msg.append(f"Added {humanize_roles(to_add)} to **{member}**.")NEWLINE if already_added:NEWLINE msg.append(f"**{member}** already had {humanize_roles(already_added)}.")NEWLINE if not_allowed:NEWLINE msg.append(NEWLINE f"You do not have permission to assign the roles {humanize_roles(not_allowed)}."NEWLINE )NEWLINE await ctx.send("\n".join(msg))NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @multirole.command("remove", require_var_positional=True)NEWLINE async def multirole_remove(NEWLINE self, ctx: commands.Context, member: TouchableMember, *roles: StrictRoleNEWLINE ):NEWLINE """Remove multiple roles from a member."""NEWLINE not_allowed = []NEWLINE not_added = []NEWLINE to_rm = []NEWLINE for role in roles:NEWLINE allowed = await is_allowed_by_role_hierarchy(self.bot, ctx.me, ctx.author, role)NEWLINE if not allowed[0]:NEWLINE not_allowed.append(role)NEWLINE elif role not in member.roles:NEWLINE not_added.append(role)NEWLINE else:NEWLINE to_rm.append(role)NEWLINE reason = get_audit_reason(ctx.author)NEWLINE msg = []NEWLINE if to_rm:NEWLINE await member.remove_roles(*to_rm, reason=reason)NEWLINE msg.append(f"Removed {humanize_roles(to_rm)} from **{member}**.")NEWLINE if not_added:NEWLINE msg.append(f"**{member}** didn't have {humanize_roles(not_added)}.")NEWLINE if not_allowed:NEWLINE msg.append(NEWLINE f"You do not have permission to assign the roles {humanize_roles(not_allowed)}."NEWLINE )NEWLINE await ctx.send("\n".join(msg))NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command()NEWLINE async def all(self, ctx: commands.Context, *, role: StrictRole):NEWLINE """Add a role to all members of the server."""NEWLINE await self.super_massrole(ctx, ctx.guild.members, role)NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command(aliases=["removeall"])NEWLINE async def rall(self, ctx: commands.Context, *, role: StrictRole):NEWLINE """Remove a role from all members of the server."""NEWLINE member_list = self.get_member_list(ctx.guild.members, role, False)NEWLINE await self.super_massrole(NEWLINE ctx, member_list, role, "No one on the server has this role.", FalseNEWLINE )NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command()NEWLINE async def humans(self, ctx: commands.Context, *, role: StrictRole):NEWLINE """Add a role to all humans (non-bots) in the server."""NEWLINE await self.super_massrole(NEWLINE ctx,NEWLINE [member for member in ctx.guild.members if not member.bot],NEWLINE role,NEWLINE "Every human in the server has this role.",NEWLINE )NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command()NEWLINE async def rhumans(self, ctx: commands.Context, *, role: StrictRole):NEWLINE """Remove a role from all humans (non-bots) in the server."""NEWLINE await self.super_massrole(NEWLINE ctx,NEWLINE [member for member in ctx.guild.members if not member.bot],NEWLINE role,NEWLINE "None of the humans in the server have this role.",NEWLINE False,NEWLINE )NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command()NEWLINE async def bots(self, ctx: commands.Context, *, role: StrictRole):NEWLINE """Add a role to all bots in the server."""NEWLINE await self.super_massrole(NEWLINE ctx,NEWLINE [member for member in ctx.guild.members if member.bot],NEWLINE role,NEWLINE "Every bot in the server has this role.",NEWLINE )NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command()NEWLINE async def rbots(self, ctx: commands.Context, *, role: StrictRole):NEWLINE """Remove a role from all bots in the server."""NEWLINE await self.super_massrole(NEWLINE ctx,NEWLINE [member for member in ctx.guild.members if member.bot],NEWLINE role,NEWLINE "None of the bots in the server have this role.",NEWLINE False,NEWLINE )NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command("in")NEWLINE async def role_in(NEWLINE self, ctx: commands.Context, target_role: FuzzyRole, *, add_role: StrictRoleNEWLINE ):NEWLINE """Add a role to all members of a another role."""NEWLINE await self.super_massrole(NEWLINE ctx,NEWLINE [member for member in target_role.members],NEWLINE add_role,NEWLINE f"Every member of **{target_role}** has this role.",NEWLINE )NEWLINENEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.command("rin")NEWLINE async def role_rin(NEWLINE self, ctx: commands.Context, target_role: FuzzyRole, *, remove_role: StrictRoleNEWLINE ):NEWLINE """Remove a role from all members of a another role."""NEWLINE await self.super_massrole(NEWLINE ctx,NEWLINE [member for member in target_role.members],NEWLINE remove_role,NEWLINE f"No one in **{target_role}** has this role.",NEWLINE False,NEWLINE )NEWLINENEWLINE @commands.check(targeter_cog)NEWLINE @commands.has_guild_permissions(manage_roles=True)NEWLINE @commands.bot_has_permissions(manage_roles=True)NEWLINE @role.group()NEWLINE async def target(self, ctx: commands.Context):NEWLINE """NEWLINE Modify roles using 'targeting' args.NEWLINENEWLINE An explanation of Targeter and test commands to preview the members affected can be found with `[p]target`.NEWLINE """NEWLINENEWLINE @target.command("add")NEWLINE async def target_add(self, ctx: commands.Context, role: StrictRole, *, args: TargeterArgs):NEWLINE """NEWLINE Add a role to members using targeting args.NEWLINENEWLINE An explanation of Targeter and test commands to preview the members affected can be found with `[p]target`.NEWLINE """NEWLINE await self.super_massrole(NEWLINE ctx,NEWLINE args,NEWLINE role,NEWLINE f"No one was found with the given args that was eligible to recieve **{role}**.",NEWLINE )NEWLINENEWLINE @target.command("remove")NEWLINE async def target_remove(self, ctx: commands.Context, role: StrictRole, *, args: TargeterArgs):NEWLINE """NEWLINE Remove a role from members using targeting args.NEWLINENEWLINE An explanation of Targeter and test commands to preview the members affected can be found with `[p]target`.NEWLINE """NEWLINE await self.super_massrole(NEWLINE ctx,NEWLINE args,NEWLINE role,NEWLINE f"No one was found with the given args that was eligible have **{role}** removed from them.",NEWLINE False,NEWLINE )NEWLINENEWLINE async def super_massrole(NEWLINE self,NEWLINE ctx: commands.Context,NEWLINE members: list,NEWLINE role: discord.Role,NEWLINE fail_message: str = "Everyone in the server has this role.",NEWLINE adding: bool = True,NEWLINE ):NEWLINE if guild_roughly_chunked(ctx.guild) is False and self.bot.intents.members:NEWLINE await ctx.guild.chunk()NEWLINE member_list = self.get_member_list(members, role, adding)NEWLINE if not member_list:NEWLINE await ctx.send(fail_message)NEWLINE returnNEWLINE verb = "add" if adding else "remove"NEWLINE word = "to" if adding else "from"NEWLINE await ctx.send(NEWLINE f"Beginning to {verb} **{role.name}** {word} **{len(member_list)}** members."NEWLINE )NEWLINE async with ctx.typing():NEWLINE result = await self.massrole(member_list, [role], get_audit_reason(ctx.author), adding)NEWLINE result_text = f"{verb.title()[:5]}ed **{role.name}** {word} **{len(result['completed'])}** members."NEWLINE if result["skipped"]:NEWLINE result_text += (NEWLINE f"\nSkipped {verb[:5]}ing roles for **{len(result['skipped'])}** members."NEWLINE )NEWLINE if result["failed"]:NEWLINE result_text += (NEWLINE f"\nFailed {verb[:5]}ing roles for **{len(result['failed'])}** members."NEWLINE )NEWLINE await ctx.send(result_text)NEWLINENEWLINE def get_member_list(self, members: list, role: discord.Role, adding: bool = True):NEWLINE if adding:NEWLINE members = [member for member in members if role not in member.roles]NEWLINE else:NEWLINE members = [member for member in members if role in member.roles]NEWLINE return membersNEWLINENEWLINE async def massrole(self, members: list, roles: list, reason: str, adding: bool = True):NEWLINE completed = []NEWLINE skipped = []NEWLINE failed = []NEWLINE for member in members:NEWLINE if adding:NEWLINE to_add = [role for role in roles if role not in member.roles]NEWLINE if to_add:NEWLINE try:NEWLINE await member.add_roles(*to_add, reason=reason)NEWLINE except Exception as e:NEWLINE failed.append(member)NEWLINE log.exception(f"Failed to add roles to {member}", exc_info=e)NEWLINE else:NEWLINE completed.append(member)NEWLINE else:NEWLINE skipped.append(member)NEWLINE else:NEWLINE to_remove = [role for role in roles if role in member.roles]NEWLINE if to_remove:NEWLINE try:NEWLINE await member.remove_roles(*to_remove, reason=reason)NEWLINE except Exception as e:NEWLINE failed.append(member)NEWLINE log.exception(f"Failed to remove roles from {member}", exc_info=e)NEWLINE else:NEWLINE completed.append(member)NEWLINE else:NEWLINE skipped.append(member)NEWLINE return {"completed": completed, "skipped": skipped, "failed": failed}NEWLINENEWLINE @staticmethodNEWLINE def format_members(members: List[discord.Member]):NEWLINE length = len(members)NEWLINE s = "" if length == 1 else "s"NEWLINE return f"**{hn(length)}** member{s}"NEWLINENEWLINE @role.command("uniquemembers", aliases=["um"], require_var_positional=True)NEWLINE async def role_uniquemembers(self, ctx: commands.Context, *roles: FuzzyRole):NEWLINE """NEWLINE View the total unique members between multiple roles.NEWLINE """NEWLINE roles_length = len(roles)NEWLINE if roles_length == 1:NEWLINE raise commands.UserFeedbackCheckFailure("You must provide at least 2 roles.")NEWLINE if not ctx.guild.chunked:NEWLINE await ctx.guild.chunk()NEWLINE color = roles[0].colorNEWLINE unique_members = set()NEWLINE description = []NEWLINE for role in roles:NEWLINE unique_members.update(role.members)NEWLINE description.append(f"{role.mention}: {self.format_members(role.members)}")NEWLINE description.insert(0, f"**Unique members**: {self.format_members(unique_members)}")NEWLINE e = discord.Embed(NEWLINE color=color,NEWLINE title=f"Unique members between {roles_length} roles",NEWLINE description="\n".join(description),NEWLINE )NEWLINE ref = ctx.message.to_reference(fail_if_not_exists=False)NEWLINE await ctx.send(embed=e, reference=ref)NEWLINE
#NEWLINE# This file is part of pysnmp software.NEWLINE#NEWLINE# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>NEWLINE# License: http://snmplabs.com/pysnmp/license.htmlNEWLINE#NEWLINEimport randomNEWLINEfrom hashlib import md5NEWLINEfrom hashlib import sha1NEWLINENEWLINEtry:NEWLINE from pysnmpcrypto import aes, PysnmpCryptoErrorNEWLINENEWLINEexcept ImportError:NEWLINE PysnmpCryptoError = AttributeErrorNEWLINE aes = NoneNEWLINENEWLINEfrom pyasn1.type import univNEWLINEfrom pysnmp.proto.secmod.rfc3414.priv import baseNEWLINEfrom pysnmp.proto.secmod.rfc3414.auth import hmacmd5NEWLINEfrom pysnmp.proto.secmod.rfc3414.auth import hmacshaNEWLINEfrom pysnmp.proto.secmod.rfc7860.auth import hmacsha2NEWLINEfrom pysnmp.proto.secmod.rfc3414 import localkeyNEWLINEfrom pysnmp.proto import errindNEWLINEfrom pysnmp.proto import errorNEWLINENEWLINErandom.seed()NEWLINENEWLINENEWLINE# RFC3826NEWLINENEWLINE#NEWLINENEWLINEclass Aes(base.AbstractEncryptionService):NEWLINE SERVICE_ID = (1, 3, 6, 1, 6, 3, 10, 1, 2, 4) # usmAesCfb128ProtocolNEWLINE KEY_SIZE = 16NEWLINENEWLINE local_int = random.randrange(0, 0xffffffffffffffff)NEWLINENEWLINE # 3.1.2.1NEWLINE def _getEncryptionKey(self, privKey, snmpEngineBoots, snmpEngineTime):NEWLINE salt = [NEWLINE self.local_int >> 56 & 0xff,NEWLINE self.local_int >> 48 & 0xff,NEWLINE self.local_int >> 40 & 0xff,NEWLINE self.local_int >> 32 & 0xff,NEWLINE self.local_int >> 24 & 0xff,NEWLINE self.local_int >> 16 & 0xff,NEWLINE self.local_int >> 8 & 0xff,NEWLINE self.local_int & 0xffNEWLINE ]NEWLINENEWLINE if self.local_int == 0xffffffffffffffff:NEWLINE self.local_int = 0NEWLINENEWLINE else:NEWLINE self.local_int += 1NEWLINENEWLINE key, iv = self._getDecryptionKey(NEWLINE privKey, snmpEngineBoots, snmpEngineTime, salt)NEWLINENEWLINE return key, iv, univ.OctetString(salt).asOctets()NEWLINENEWLINE def _getDecryptionKey(self, privKey, snmpEngineBoots,NEWLINE snmpEngineTime, salt):NEWLINENEWLINE snmpEngineBoots, snmpEngineTime, salt = (NEWLINE int(snmpEngineBoots), int(snmpEngineTime), salt)NEWLINENEWLINE iv = [NEWLINE snmpEngineBoots >> 24 & 0xff,NEWLINE snmpEngineBoots >> 16 & 0xff,NEWLINE snmpEngineBoots >> 8 & 0xff,NEWLINE snmpEngineBoots & 0xff,NEWLINE snmpEngineTime >> 24 & 0xff,NEWLINE snmpEngineTime >> 16 & 0xff,NEWLINE snmpEngineTime >> 8 & 0xff,NEWLINE snmpEngineTime & 0xffNEWLINE ]NEWLINENEWLINE iv += saltNEWLINENEWLINE key = privKey[:self.KEY_SIZE].asOctets()NEWLINE iv = univ.OctetString(iv).asOctets()NEWLINENEWLINE return key, ivNEWLINENEWLINE def hashPassphrase(self, authProtocol, privKey):NEWLINE if authProtocol == hmacmd5.HmacMd5.SERVICE_ID:NEWLINE hashAlgo = md5NEWLINENEWLINE elif authProtocol == hmacsha.HmacSha.SERVICE_ID:NEWLINE hashAlgo = sha1NEWLINENEWLINE elif authProtocol in hmacsha2.HmacSha2.HASH_ALGORITHM:NEWLINE hashAlgo = hmacsha2.HmacSha2.HASH_ALGORITHM[authProtocol]NEWLINENEWLINE else:NEWLINE raise error.ProtocolError(NEWLINE 'Unknown auth protocol %s' % (authProtocol,))NEWLINENEWLINE return localkey.hashPassphrase(privKey, hashAlgo)NEWLINENEWLINE def localizeKey(self, authProtocol, privKey, snmpEngineID):NEWLINE if authProtocol == hmacmd5.HmacMd5.SERVICE_ID:NEWLINE hashAlgo = md5NEWLINENEWLINE elif authProtocol == hmacsha.HmacSha.SERVICE_ID:NEWLINE hashAlgo = sha1NEWLINENEWLINE elif authProtocol in hmacsha2.HmacSha2.HASH_ALGORITHM:NEWLINE hashAlgo = hmacsha2.HmacSha2.HASH_ALGORITHM[authProtocol]NEWLINENEWLINE else:NEWLINE raise error.ProtocolError(NEWLINE 'Unknown auth protocol %s' % (authProtocol,))NEWLINENEWLINE localPrivKey = localkey.localizeKey(privKey, snmpEngineID, hashAlgo)NEWLINENEWLINE return localPrivKey[:self.KEY_SIZE]NEWLINENEWLINE # 3.2.4.1NEWLINE def encryptData(self, encryptKey, privParameters, dataToEncrypt):NEWLINE snmpEngineBoots, snmpEngineTime, salt = privParametersNEWLINENEWLINE # 3.3.1.1NEWLINE aesKey, iv, salt = self._getEncryptionKey(NEWLINE encryptKey, snmpEngineBoots, snmpEngineTime)NEWLINENEWLINE # 3.3.1.3NEWLINE # PyCrypto seems to require paddingNEWLINE padding = univ.OctetString((0,) * (16 - len(dataToEncrypt) % 16))NEWLINE dataToEncrypt += paddingNEWLINENEWLINE try:NEWLINE ciphertext = aes.encrypt(dataToEncrypt.asOctets(), aesKey, iv)NEWLINENEWLINE except PysnmpCryptoError:NEWLINE raise error.StatusInformation(NEWLINE errorIndication=errind.unsupportedPrivProtocol)NEWLINENEWLINE # 3.3.1.4NEWLINE return univ.OctetString(ciphertext), univ.OctetString(salt)NEWLINENEWLINE # 3.2.4.2NEWLINE def decryptData(self, decryptKey, privParameters, encryptedData):NEWLINE snmpEngineBoots, snmpEngineTime, salt = privParametersNEWLINENEWLINE # 3.3.2.1NEWLINE if len(salt) != 8:NEWLINE raise error.StatusInformation(NEWLINE errorIndication=errind.decryptionError)NEWLINENEWLINE # 3.3.2.3NEWLINE aesKey, iv = self._getDecryptionKey(NEWLINE decryptKey, snmpEngineBoots, snmpEngineTime, salt)NEWLINENEWLINE # PyCrypto seems to require paddingNEWLINE padding = univ.OctetString((0,) * (16 - len(encryptedData) % 16))NEWLINE encryptedData += paddingNEWLINENEWLINE try:NEWLINE # 3.3.2.4-6NEWLINE return aes.decrypt(encryptedData.asOctets(), aesKey, iv)NEWLINENEWLINE except PysnmpCryptoError:NEWLINE raise error.StatusInformation(NEWLINE errorIndication=errind.unsupportedPrivProtocol)NEWLINE
from rest_framework import routersNEWLINENEWLINEfrom api.transaction.viewsets import TransactionViewSetNEWLINENEWLINErouter = routers.SimpleRouter(trailing_slash=False)NEWLINENEWLINErouter.register('', TransactionViewSet, basename='transactions')NEWLINENEWLINEurlpatterns = [NEWLINE *router.urls,NEWLINE]NEWLINE
# Copyright (c) Facebook, Inc. and its affiliates.NEWLINENEWLINEimport numpy as npNEWLINEfrom typing import Dict, List, OptionalNEWLINEimport fvcore.nn.weight_init as weight_initNEWLINEimport torchNEWLINEimport torch.nn as nnNEWLINEfrom torch.nn import functional as FNEWLINENEWLINEfrom detectron2.layers import Conv2d, ShapeSpec, get_normNEWLINEfrom detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeadsNEWLINEfrom detectron2.modeling.poolers import ROIPoolerNEWLINEfrom detectron2.modeling.roi_heads import select_foreground_proposalsNEWLINEfrom detectron2.structures import ImageList, InstancesNEWLINENEWLINEfrom .. import (NEWLINE build_densepose_data_filter,NEWLINE build_densepose_embedder,NEWLINE build_densepose_head,NEWLINE build_densepose_losses,NEWLINE build_densepose_predictor,NEWLINE densepose_inference,NEWLINE)NEWLINENEWLINENEWLINEclass Decoder(nn.Module):NEWLINE """NEWLINE A semantic segmentation head described in detail in the Panoptic Feature Pyramid Networks paperNEWLINE (https://arxiv.org/abs/1901.02446). It takes FPN features as input and merges information fromNEWLINE all levels of the FPN into single output.NEWLINE """NEWLINENEWLINE def __init__(self, cfg, input_shape: Dict[str, ShapeSpec], in_features):NEWLINE super(Decoder, self).__init__()NEWLINENEWLINE # fmt: offNEWLINE self.in_features = in_featuresNEWLINE feature_strides = {k: v.stride for k, v in input_shape.items()}NEWLINE feature_channels = {k: v.channels for k, v in input_shape.items()}NEWLINE num_classes = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NUM_CLASSESNEWLINE conv_dims = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_CONV_DIMSNEWLINE self.common_stride = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_COMMON_STRIDENEWLINE norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NORMNEWLINE # fmt: onNEWLINENEWLINE self.scale_heads = []NEWLINE for in_feature in self.in_features:NEWLINE head_ops = []NEWLINE head_length = max(NEWLINE 1, int(np.log2(feature_strides[in_feature]) - np.log2(self.common_stride))NEWLINE )NEWLINE for k in range(head_length):NEWLINE conv = Conv2d(NEWLINE feature_channels[in_feature] if k == 0 else conv_dims,NEWLINE conv_dims,NEWLINE kernel_size=3,NEWLINE stride=1,NEWLINE padding=1,NEWLINE bias=not norm,NEWLINE norm=get_norm(norm, conv_dims),NEWLINE activation=F.relu,NEWLINE )NEWLINE weight_init.c2_msra_fill(conv)NEWLINE head_ops.append(conv)NEWLINE if feature_strides[in_feature] != self.common_stride:NEWLINE head_ops.append(NEWLINE nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)NEWLINE )NEWLINE self.scale_heads.append(nn.Sequential(*head_ops))NEWLINE # pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.NEWLINE self.add_module(in_feature, self.scale_heads[-1])NEWLINE self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0)NEWLINE weight_init.c2_msra_fill(self.predictor)NEWLINENEWLINE def forward(self, features: List[torch.Tensor]):NEWLINE for i, _ in enumerate(self.in_features):NEWLINE if i == 0:NEWLINE x = self.scale_heads[i](features[i])NEWLINE else:NEWLINE x = x + self.scale_heads[i](features[i])NEWLINE x = self.predictor(x)NEWLINE return xNEWLINENEWLINENEWLINE@ROI_HEADS_REGISTRY.register()NEWLINEclass DensePoseROIHeads(StandardROIHeads):NEWLINE """NEWLINE A Standard ROIHeads which contains an addition of DensePose head.NEWLINE """NEWLINENEWLINE def __init__(self, cfg, input_shape):NEWLINE super().__init__(cfg, input_shape)NEWLINE self._init_densepose_head(cfg, input_shape)NEWLINENEWLINE def _init_densepose_head(self, cfg, input_shape):NEWLINE # fmt: offNEWLINE self.densepose_on = cfg.MODEL.DENSEPOSE_ONNEWLINE if not self.densepose_on:NEWLINE returnNEWLINE self.densepose_data_filter = build_densepose_data_filter(cfg)NEWLINE dp_pooler_resolution = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTIONNEWLINE dp_pooler_sampling_ratio = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIONEWLINE dp_pooler_type = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPENEWLINE self.use_decoder = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_ONNEWLINE # fmt: onNEWLINE if self.use_decoder:NEWLINE dp_pooler_scales = (1.0 / input_shape[self.in_features[0]].stride,)NEWLINE else:NEWLINE dp_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)NEWLINE in_channels = [input_shape[f].channels for f in self.in_features][0]NEWLINENEWLINE if self.use_decoder:NEWLINE self.decoder = Decoder(cfg, input_shape, self.in_features)NEWLINENEWLINE self.densepose_pooler = ROIPooler(NEWLINE output_size=dp_pooler_resolution,NEWLINE scales=dp_pooler_scales,NEWLINE sampling_ratio=dp_pooler_sampling_ratio,NEWLINE pooler_type=dp_pooler_type,NEWLINE )NEWLINE self.densepose_head = build_densepose_head(cfg, in_channels)NEWLINE self.densepose_predictor = build_densepose_predictor(NEWLINE cfg, self.densepose_head.n_out_channelsNEWLINE )NEWLINE self.densepose_losses = build_densepose_losses(cfg)NEWLINE self.embedder = build_densepose_embedder(cfg)NEWLINENEWLINE def _forward_densepose(self, features: Dict[str, torch.Tensor], instances: List[Instances]):NEWLINE """NEWLINE Forward logic of the densepose prediction branch.NEWLINENEWLINE Args:NEWLINE features (dict[str, Tensor]): input data as a mapping from featureNEWLINE map name to tensor. Axis 0 represents the number of images `N` inNEWLINE the input data; axes 1-3 are channels, height, and width, which mayNEWLINE vary between feature maps (e.g., if a feature pyramid is used).NEWLINE instances (list[Instances]): length `N` list of `Instances`. The i-thNEWLINE `Instances` contains instances for the i-th input image,NEWLINE In training, they can be the proposals.NEWLINE In inference, they can be the predicted boxes.NEWLINENEWLINE Returns:NEWLINE In training, a dict of losses.NEWLINE In inference, update `instances` with new fields "densepose" and return it.NEWLINE """NEWLINE if not self.densepose_on:NEWLINE return {} if self.training else instancesNEWLINENEWLINE features_list = [features[f] for f in self.in_features]NEWLINE if self.training:NEWLINE proposals, _ = select_foreground_proposals(instances, self.num_classes)NEWLINE features_list, proposals = self.densepose_data_filter(features_list, proposals)NEWLINE if len(proposals) > 0:NEWLINE proposal_boxes = [x.proposal_boxes for x in proposals]NEWLINENEWLINE if self.use_decoder:NEWLINE # pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not aNEWLINE # function.NEWLINE features_list = [self.decoder(features_list)]NEWLINENEWLINE features_dp = self.densepose_pooler(features_list, proposal_boxes)NEWLINE densepose_head_outputs = self.densepose_head(features_dp)NEWLINE densepose_predictor_outputs = self.densepose_predictor(densepose_head_outputs)NEWLINE densepose_loss_dict = self.densepose_losses(NEWLINE proposals, densepose_predictor_outputs, embedder=self.embedderNEWLINE )NEWLINE return densepose_loss_dictNEWLINE else:NEWLINE pred_boxes = [x.pred_boxes for x in instances]NEWLINENEWLINE if self.use_decoder:NEWLINE # pyre-fixme[29]: `Union[nn.Module, torch.Tensor]` is not a function.NEWLINE features_list = [self.decoder(features_list)]NEWLINENEWLINE features_dp = self.densepose_pooler(features_list, pred_boxes)NEWLINE if len(features_dp) > 0:NEWLINE densepose_head_outputs = self.densepose_head(features_dp)NEWLINE densepose_predictor_outputs = self.densepose_predictor(densepose_head_outputs)NEWLINE else:NEWLINE densepose_predictor_outputs = NoneNEWLINENEWLINE densepose_inference(densepose_predictor_outputs, instances)NEWLINE return instancesNEWLINENEWLINE def forward(NEWLINE self,NEWLINE images: ImageList,NEWLINE features: Dict[str, torch.Tensor],NEWLINE proposals: List[Instances],NEWLINE targets: Optional[List[Instances]] = None,NEWLINE ):NEWLINE instances, losses = super().forward(images, features, proposals, targets)NEWLINE del targets, imagesNEWLINENEWLINE if self.training:NEWLINE losses.update(self._forward_densepose(features, instances))NEWLINE return instances, lossesNEWLINENEWLINE def forward_with_given_boxes(NEWLINE self, features: Dict[str, torch.Tensor], instances: List[Instances]NEWLINE ):NEWLINE """NEWLINE Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.NEWLINENEWLINE This is useful for downstream tasks where a box is known, but need to obtainNEWLINE other attributes (outputs of other heads).NEWLINE Test-time augmentation also uses this.NEWLINENEWLINE Args:NEWLINE features: same as in `forward()`NEWLINE instances (list[Instances]): instances to predict other outputs. Expect the keysNEWLINE "pred_boxes" and "pred_classes" to exist.NEWLINENEWLINE Returns:NEWLINE instances (list[Instances]):NEWLINE the same `Instances` objects, with extraNEWLINE fields such as `pred_masks` or `pred_keypoints`.NEWLINE """NEWLINENEWLINE instances = super().forward_with_given_boxes(features, instances)NEWLINE instances = self._forward_densepose(features, instances)NEWLINE return instancesNEWLINE
# This file is part of the Astrometry.net suite.NEWLINE# Licensed under a 3-clause BSD style license - see LICENSENEWLINENEWLINE# Generates FITS tables from CSV lists of OpenNGC entries and names.NEWLINENEWLINEfrom __future__ import print_functionNEWLINENEWLINEimport csvNEWLINENEWLINEfrom astrometry.util.fits import fits_tableNEWLINEimport numpy as npNEWLINENEWLINENEWLINEdef convert_openngc_entries():NEWLINE entries = []NEWLINENEWLINE with open('openngc-entries.csv') as f:NEWLINE for is_ngc, num, ra, dec, size in csv.reader(f, delimiter=';'):NEWLINE is_ngc = (is_ngc == '1')NEWLINE num = int(num)NEWLINE ra = float(ra) if ra else 0.0NEWLINE dec = float(dec) if dec else 0.0NEWLINENEWLINE # Convert from diameter in arcmins to radius in degrees.NEWLINE radius = float(size) / (2.0 * 60.0) if size else 0.0NEWLINENEWLINE entries.append({NEWLINE 'is_ngc': is_ngc,NEWLINE 'ra': ra,NEWLINE 'dec': dec,NEWLINE 'radius': radius,NEWLINE 'num': num,NEWLINE })NEWLINENEWLINE T = fits_table()NEWLINE for key in ['is_ngc', 'ra', 'dec', 'radius', 'num']:NEWLINE T.set(key, [x[key] for x in entries])NEWLINENEWLINE T.to_np_arrays()NEWLINENEWLINE T.name = np.array(['NGC %i' % n if isngc else 'IC %i' % nNEWLINE for n, isngc in zip(T.num, T.is_ngc)])NEWLINENEWLINE for key in ['ra', 'dec', 'radius']:NEWLINE T.set(key, T.get(key).astype(np.float32))NEWLINE T.num = T.num.astype(np.int16)NEWLINENEWLINE units_dict = {NEWLINE 'ra': 'deg',NEWLINE 'dec': 'deg',NEWLINE 'radius': 'deg',NEWLINE }NEWLINENEWLINE NGC = T[T.is_ngc]NEWLINE NGC.rename('num', 'ngcnum')NEWLINE NGC.delete_column('is_ngc')NEWLINE units = [units_dict.get(c, '') for c in NGC.get_columns()]NEWLINE NGC.writeto('openngc-ngc.fits', units=units)NEWLINENEWLINE IC = T[np.logical_not(T.is_ngc)]NEWLINE IC.rename('num', 'icnum')NEWLINE IC.delete_column('is_ngc')NEWLINE units = [units_dict.get(c, '') for c in IC.get_columns()]NEWLINE IC.writeto('openngc-ic.fits', units=units)NEWLINENEWLINENEWLINEdef convert_openngc_names():NEWLINE names = []NEWLINENEWLINE with open('openngc-names.csv') as f:NEWLINE for is_ngc, num, name in csv.reader(f, delimiter=';'):NEWLINENEWLINE is_ngc = bool(is_ngc)NEWLINENEWLINE num = int(num)NEWLINENEWLINE identifier = '%s%d' % ('' if is_ngc else 'I', num)NEWLINENEWLINE names.append({NEWLINE 'Object': name,NEWLINE 'Name': identifier,NEWLINE })NEWLINENEWLINE T = fits_table()NEWLINE for key in ['Object', 'Name']:NEWLINE T.set(key, [x[key] for x in names])NEWLINE T.writeto('openngc-names.fits')NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE convert_openngc_entries()NEWLINE convert_openngc_names()NEWLINE
from __future__ import unicode_literalsNEWLINENEWLINEfrom decimal import DecimalNEWLINEimport jsonNEWLINEimport loggingNEWLINEfrom urllib.request import urlopenNEWLINENEWLINEfrom django.core.exceptions import ImproperlyConfiguredNEWLINENEWLINEfrom dj_currencies.sources import CurrencyDataExchangeSourceNEWLINEfrom .exceptions import RateBackendErrorNEWLINEfrom .models import ExchangeRateNEWLINEfrom .settings import currency_settingsNEWLINENEWLINElogger = logging.getLogger(__name__)NEWLINENEWLINENEWLINEclass BaseRateBackend(object):NEWLINENEWLINE def get_latest_rates(self, base_currency, symbols=None):NEWLINE """NEWLINE Fetch latest rates for one base currencyNEWLINE :param base_currency: a three letter currency symbolNEWLINE :param symbols: List of symbols to fetchNEWLINE :return:NEWLINE """NEWLINE raise NotImplementedError()NEWLINENEWLINE def update_rates(self):NEWLINE raise NotImplementedError()NEWLINENEWLINENEWLINEclass OpenExchangeBackend(BaseRateBackend):NEWLINENEWLINE def __init__(self):NEWLINE if not currency_settings.OPENEXCHANGE_APP_ID:NEWLINE raise ImproperlyConfigured(NEWLINE "OPENEXCHANGE_APP_ID setting should not be empty when using OpenExchangeBackend")NEWLINENEWLINE if not currency_settings.BASE_CURRENCIES:NEWLINE raise ImproperlyConfigured(NEWLINE "BASE_CURRENCIES setting should not be empty. It should be set as a three letter currency code")NEWLINENEWLINE # Build the base api urlNEWLINE self.base_url = 'https://openexchangerates.org/api/latest.json?app_id={0}'.format(NEWLINE currency_settings.OPENEXCHANGE_APP_IDNEWLINE )NEWLINENEWLINE def get_end_point_url(self, base_currency, symbols):NEWLINE url = self.base_url + '&base={0}'.format(base_currency)NEWLINE if symbols:NEWLINE symbol_args = ','.join(symbols)NEWLINE url = url + '&symbols={0}'.format(symbol_args)NEWLINE return urlNEWLINENEWLINE def get_cached_rates(self, symbols=None):NEWLINE if not symbols:NEWLINE return {}NEWLINENEWLINE ex_rates = ExchangeRate.objects.order_by('base_currency', '-last_updated_at').filter(NEWLINE base_currency__in=symbolsNEWLINE ).distinct('base_currency')[:len(symbols)]NEWLINENEWLINE return {ex_rate.base_currency: ex_rate.rates for ex_rate in ex_rates}NEWLINENEWLINE def get_latest_rates(self, base_currency, symbols=None):NEWLINE url = self.get_end_point_url(base_currency, symbols)NEWLINENEWLINE try:NEWLINE data = urlopen(url).read().decode("utf-8")NEWLINE return json.loads(data)['rates']NEWLINE except Exception as e:NEWLINE logger.exception("Error retrieving data from %s", url)NEWLINE raise RateBackendError("Error retrieving rates: %s" % e)NEWLINENEWLINE def update_rates(self):NEWLINE for currency in currency_settings.BASE_CURRENCIES:NEWLINE print('Updating exchange rates with base currency {0}'.format(currency))NEWLINE rates = self.get_latest_rates(currency)NEWLINE ExchangeRate.objects.create(NEWLINE base_currency=currency,NEWLINE rates=rates,NEWLINE source=CurrencyDataExchangeSource.OPENEXCHANGERATES,NEWLINE )NEWLINENEWLINE def convert_money(self, amount, currency_from, currency_to):NEWLINE ex_rate = ExchangeRate.objects.base_currency(currency_from).within_days(NEWLINE currency_settings.MAX_CACHE_DAYS)NEWLINENEWLINE if isinstance(amount, float):NEWLINE amount = Decimal(amount).quantize(Decimal('.000001'))NEWLINENEWLINE rate_to = ex_rate.rates.get(currency_to)NEWLINENEWLINE if not rate_to:NEWLINE raise RateBackendError(NEWLINE 'No exchange rate found from {0} to {1}'.format(ex_rate.base_currency, currency_to))NEWLINE rate_to = Decimal(str(rate_to)).quantize(Decimal('.000001'))NEWLINE converted_amount = amount * rate_toNEWLINENEWLINE return converted_amount.quantize(Decimal('1.00'))NEWLINE
from ..util import ormNEWLINENEWLINEasync def insert_spoiler_race(srl_id, spoiler_url, studytime=900):NEWLINE await orm.execute(NEWLINE 'INSERT INTO spoiler_races(srl_id, spoiler_url, studytime) VALUES (%s,%s,%s);',NEWLINE [srl_id, spoiler_url, studytime]NEWLINE )NEWLINENEWLINEasync def delete_spoiler_race(srl_id):NEWLINE await orm.execute(NEWLINE 'DELETE FROM spoiler_races WHERE srl_id=%s;',NEWLINE [srl_id]NEWLINE )NEWLINENEWLINEasync def get_spoiler_races():NEWLINE results = await orm.select(NEWLINE 'SELECT * from spoiler_races;'NEWLINE )NEWLINE return resultsNEWLINENEWLINEasync def get_spoiler_race_by_id(srl_id):NEWLINE results = await orm.select(NEWLINE 'SELECT * from spoiler_races where srl_id=%s;',NEWLINE [srl_id]NEWLINE )NEWLINE return results[0] if len(results) > 0 else False
#! /usr/bin/env pythonNEWLINE# -*- coding: utf-8 -*-NEWLINEfrom collections import OrderedDictNEWLINENEWLINEimport numpy as npNEWLINEfrom landlab import RasterModelGridNEWLINEfrom landlab.bmi.bmi_bridge import TimeStepperNEWLINENEWLINEfrom compaction.landlab import CompactNEWLINENEWLINEfrom .bathymetry import BathymetryReaderNEWLINEfrom .fluvial import FluvialNEWLINENEWLINE# from .raster_model import RasterModelNEWLINEfrom .input_reader import load_configNEWLINEfrom .output_writer import OutputWriterNEWLINEfrom .sea_level import SeaLevelTimeSeries, SinusoidalSeaLevelNEWLINEfrom .sediment_flexure import SedimentFlexureNEWLINEfrom .shoreline import ShorelineFinderNEWLINEfrom .submarine import SubmarineDiffuserNEWLINEfrom .subsidence import SubsidenceTimeSeriesNEWLINENEWLINENEWLINEclass SequenceModel:NEWLINENEWLINE DEFAULT_PARAMS = {NEWLINE "grid": {NEWLINE "shape": [3, 100],NEWLINE "xy_spacing": 100.0,NEWLINE "xy_of_lower_left": [0.0, 0.0],NEWLINE "bc": {"top": "closed", "bottom": "closed"},NEWLINE },NEWLINE "clock": {"start": 0.0, "stop": 20000.0, "step": 100.0},NEWLINE "output": {NEWLINE "interval": 10,NEWLINE "filepath": "sequence.nc",NEWLINE "clobber": True,NEWLINE "rows": [1],NEWLINE "fields": ["sediment_deposit__thickness"],NEWLINE },NEWLINE "submarine_diffusion": {NEWLINE "plain_slope": 0.0008,NEWLINE "wave_base": 60.0,NEWLINE "shoreface_height": 15.0,NEWLINE "alpha": 0.0005,NEWLINE "shelf_slope": 0.001,NEWLINE "sediment_load": 3.0,NEWLINE "load_sealevel": 0.0,NEWLINE "basin_width": 500000.0,NEWLINE },NEWLINE "sea_level": {NEWLINE "amplitude": 10.0,NEWLINE "wave_length": 1000.0,NEWLINE "phase": 0.0,NEWLINE "linear": 0.0,NEWLINE },NEWLINE "subsidence": {"filepath": "subsidence.csv"},NEWLINE "flexure": {"method": "flexure", "rho_mantle": 3300.0, "isostasytime": 0},NEWLINE "sediments": {NEWLINE "layers": 2,NEWLINE "sand": 1.0,NEWLINE "mud": 0.006,NEWLINE "sand_density": 2650.0,NEWLINE "mud_density": 2720.0,NEWLINE "sand_frac": 0.5,NEWLINE "hemipelagic": 0.0,NEWLINE },NEWLINE "bathymetry": {"filepath": "bathymetry.csv", "kind": "linear"},NEWLINE "compaction": {NEWLINE "c": 5.0e-08,NEWLINE "porosity_max": 0.5,NEWLINE "porosity_min": 0.01,NEWLINE "rho_grain": 2650.0,NEWLINE "rho_void": 1000.0,NEWLINE },NEWLINE }NEWLINENEWLINE LONG_NAME = {"z": "topographic__elevation", "z0": "bedrock_surface__elevation"}NEWLINENEWLINE def __init__(NEWLINE self,NEWLINE grid=None,NEWLINE clock=None,NEWLINE output=None,NEWLINE submarine_diffusion=None,NEWLINE sea_level=None,NEWLINE subsidence=None,NEWLINE flexure=None,NEWLINE sediments=None,NEWLINE bathymetry=None,NEWLINE compaction=None,NEWLINE ):NEWLINE config = {NEWLINE "grid": grid,NEWLINE "clock": clock,NEWLINE "output": output,NEWLINE "submarine_diffusion": submarine_diffusion,NEWLINE "sea_level": sea_level,NEWLINE "subsidence": subsidence,NEWLINE "flexure": flexure,NEWLINE "sediments": sediments,NEWLINE "bathymetry": bathymetry,NEWLINE "compaction": compaction,NEWLINE }NEWLINE missing_kwds = [kwd for kwd, value in config.items() if value is None]NEWLINE if missing_kwds:NEWLINE raise ValueError(NEWLINE "missing required config parameters for SequenceModel ({0})".format(NEWLINE ", ".join(missing_kwds)NEWLINE )NEWLINE )NEWLINENEWLINE self._clock = TimeStepper(**clock)NEWLINE self._grid = RasterModelGrid.from_dict(grid)NEWLINENEWLINE self._components = OrderedDict()NEWLINE if output:NEWLINE self._output = OutputWriter(self._grid, **output)NEWLINE self._components["output"] = self._outputNEWLINENEWLINE BathymetryReader(self.grid, **bathymetry).run_one_step()NEWLINENEWLINE z = self.grid.at_node["topographic__elevation"]NEWLINE z0 = self.grid.add_empty("bedrock_surface__elevation", at="node")NEWLINE z0[:] = z - 100.0NEWLINENEWLINE self.grid.at_grid["x_of_shore"] = np.nanNEWLINE self.grid.at_grid["x_of_shelf_edge"] = np.nanNEWLINENEWLINE self.grid.event_layers.add(NEWLINE 100.0,NEWLINE age=self.clock.start,NEWLINE water_depth=-z0[self.grid.core_nodes],NEWLINE t0=10.0,NEWLINE percent_sand=0.5,NEWLINE porosity=0.5,NEWLINE )NEWLINENEWLINE if "filepath" in sea_level:NEWLINE self._sea_level = SeaLevelTimeSeries(NEWLINE self.grid, sea_level.pop("filepath"), start=clock["start"], **sea_levelNEWLINE )NEWLINE else:NEWLINE self._sea_level = SinusoidalSeaLevel(NEWLINE self.grid, start=clock["start"], **sea_levelNEWLINE )NEWLINENEWLINE self._subsidence = SubsidenceTimeSeries(self.grid, **subsidence)NEWLINENEWLINE self._submarine_diffusion = SubmarineDiffuser(self.grid, **submarine_diffusion)NEWLINE self._fluvial = Fluvial(NEWLINE self.grid,NEWLINE 0.5,NEWLINE start=0,NEWLINE sediment_load=submarine_diffusion["sediment_load"],NEWLINE plain_slope=submarine_diffusion["plain_slope"],NEWLINE hemipelagic=sediments["hemipelagic"],NEWLINE )NEWLINE self._flexure = SedimentFlexure(self.grid, **flexure)NEWLINE self._shoreline = ShorelineFinder(self.grid, alpha=submarine_diffusion["alpha"])NEWLINE self._compaction = Compact(self.grid, **compaction)NEWLINENEWLINE self._components.update(NEWLINE sea_level=self._sea_level,NEWLINE subsidence=self._subsidence,NEWLINE compaction=self._compaction,NEWLINE submarine_diffusion=self._submarine_diffusion,NEWLINE fluvial=self._fluvial,NEWLINE flexure=self._flexure,NEWLINE shoreline=self._shoreline,NEWLINE )NEWLINENEWLINE @propertyNEWLINE def grid(self):NEWLINE return self._gridNEWLINENEWLINE @propertyNEWLINE def clock(self):NEWLINE return self._clockNEWLINENEWLINE @classmethodNEWLINE def from_path(cls, filepath, fmt=None):NEWLINE return cls(**load_config(filepath, fmt=fmt))NEWLINENEWLINE def set_params(self, params):NEWLINE for component, values in params.items():NEWLINE c = self._components[component]NEWLINE for param, value in values.items():NEWLINE setattr(c, param, value)NEWLINENEWLINE def run_one_step(self, dt=None, output=None):NEWLINE """Run each component for one time step."""NEWLINE dt = dt or self.clock.stepNEWLINE self.clock.dt = dtNEWLINE self.clock.advance()NEWLINENEWLINE self.advance_components(dt)NEWLINENEWLINE def run(self, output=None):NEWLINE """Run the model until complete."""NEWLINE try:NEWLINE while 1:NEWLINE self.run_one_step()NEWLINE except StopIteration:NEWLINE passNEWLINENEWLINE def advance_components(self, dt):NEWLINE for component in self._components.values():NEWLINE component.run_one_step(dt)NEWLINENEWLINE dz = self.grid.at_node["sediment_deposit__thickness"]NEWLINE percent_sand = self.grid.at_node["delta_sediment_sand__volume_fraction"]NEWLINE water_depth = (NEWLINE self.grid.at_grid["sea_level__elevation"]NEWLINE - self.grid.at_node["topographic__elevation"]NEWLINE )NEWLINENEWLINE self.grid.event_layers.add(NEWLINE dz[self.grid.node_at_cell],NEWLINE age=self.clock.time,NEWLINE water_depth=water_depth[self.grid.node_at_cell],NEWLINE t0=dz[self.grid.node_at_cell].clip(0.0),NEWLINE percent_sand=percent_sand[self.grid.node_at_cell],NEWLINE porosity=self._compaction.porosity_max,NEWLINE )NEWLINENEWLINE try:NEWLINE self._n_archived_layersNEWLINE except AttributeError:NEWLINE self._n_archived_layers = 0NEWLINENEWLINE if (NEWLINE self.grid.event_layers.number_of_layers - self._n_archived_layersNEWLINE ) % 20 == 0:NEWLINE self.grid.event_layers.reduce(NEWLINE self._n_archived_layers,NEWLINE self._n_archived_layers + 10,NEWLINE age=np.max,NEWLINE percent_sand=np.mean,NEWLINE porosity=np.mean,NEWLINE t0=np.sum,NEWLINE water_depth=np.mean,NEWLINE )NEWLINE self._n_archived_layers += 1NEWLINE
#!python3NEWLINE#encoding: utf-8NEWLINEimport requestsNEWLINEfrom bs4 import BeautifulSoupNEWLINEimport os.pathNEWLINE"""NEWLINEBeautifulSoupでは擬似クラスはnth-of-typeしか実装されていないらしい。NEWLINENotImplementedError: Only the following pseudo-classes are implemented: nth-of-type.NEWLINEここではその代わりとなる関数を実装する。NEWLINE"""NEWLINEclass CssPseudoClass(object):NEWLINE def __init__(self):NEWLINE passNEWLINE """NEWLINE parentがchildを持っているか。NEWLINE @param {HtmlElement} parentは対象のHTML要素NEWLINE @param {str} childはparentが持つchildの要素名NEWLINE @return {boolean} 所持の是非NEWLINE """NEWLINE def Has(self, parent, child):NEWLINE for c in parent.children:NEWLINE if child == c.name:NEWLINE return TrueNEWLINE return FalseNEWLINE
from __future__ import print_functionNEWLINEimport unittestNEWLINEimport SimPEG.daskNEWLINEfrom SimPEG import (NEWLINE directives,NEWLINE maps,NEWLINE inverse_problem,NEWLINE optimization,NEWLINE data_misfit,NEWLINE inversion,NEWLINE utils,NEWLINE regularization,NEWLINE)NEWLINENEWLINEfrom discretize.utils import meshutilsNEWLINENEWLINEimport shutilNEWLINENEWLINE# import SimPEG.PF as PFNEWLINEfrom SimPEG.potential_fields import magnetics as magNEWLINEimport numpy as npNEWLINENEWLINENEWLINEclass MagInvLinProblemTest(unittest.TestCase):NEWLINE def setUp(self):NEWLINENEWLINE np.random.seed(0)NEWLINENEWLINE # First we need to define the direction of the inducing fieldNEWLINE # As a simple case, we pick a vertical inducing field of magnitudeNEWLINE # 50,000nT.NEWLINE # From old convention, field orientation is given as anNEWLINE # azimuth from North (positive clockwise)NEWLINE # and dip from the horizontal (positive downward).NEWLINE H0 = (50000.0, 90.0, 0.0)NEWLINENEWLINE # Create a meshNEWLINE h = [5, 5, 5]NEWLINE padDist = np.ones((3, 2)) * 100NEWLINE nCpad = [2, 4, 2]NEWLINENEWLINE # Create grid of points for topographyNEWLINE # Lets create a simple Gaussian topo and set the active cellsNEWLINE [xx, yy] = np.meshgrid(NEWLINE np.linspace(-200.0, 200.0, 50), np.linspace(-200.0, 200.0, 50)NEWLINE )NEWLINENEWLINE b = 100NEWLINE A = 50NEWLINE zz = A * np.exp(-0.5 * ((xx / b) ** 2.0 + (yy / b) ** 2.0))NEWLINENEWLINE # We would usually load a topofileNEWLINE topo = np.c_[utils.mkvc(xx), utils.mkvc(yy), utils.mkvc(zz)]NEWLINENEWLINE # Create and array of observation pointsNEWLINE xr = np.linspace(-100.0, 100.0, 20)NEWLINE yr = np.linspace(-100.0, 100.0, 20)NEWLINE X, Y = np.meshgrid(xr, yr)NEWLINE Z = A * np.exp(-0.5 * ((X / b) ** 2.0 + (Y / b) ** 2.0)) + 5NEWLINENEWLINE # Create a MAGsurveyNEWLINE xyzLoc = np.c_[utils.mkvc(X.T), utils.mkvc(Y.T), utils.mkvc(Z.T)]NEWLINE rxLoc = mag.Point(xyzLoc)NEWLINE srcField = mag.SourceField([rxLoc], parameters=H0)NEWLINE survey = mag.Survey(srcField)NEWLINENEWLINE # self.mesh.finalize()NEWLINE self.mesh = meshutils.mesh_builder_xyz(NEWLINE xyzLoc,NEWLINE h,NEWLINE padding_distance=padDist,NEWLINE mesh_type="TREE",NEWLINE )NEWLINENEWLINE self.mesh = meshutils.refine_tree_xyz(NEWLINE self.mesh,NEWLINE topo,NEWLINE method="surface",NEWLINE octree_levels=nCpad,NEWLINE octree_levels_padding=nCpad,NEWLINE finalize=True,NEWLINE )NEWLINENEWLINE # Define an active cells from topoNEWLINE actv = utils.surface2ind_topo(self.mesh, topo)NEWLINE nC = int(actv.sum())NEWLINENEWLINE # We can now create a susceptibility model and generate dataNEWLINE # Lets start with a simple block in half-spaceNEWLINE self.model = utils.model_builder.addBlock(NEWLINE self.mesh.gridCC,NEWLINE np.zeros(self.mesh.nC),NEWLINE np.r_[-20, -20, -15],NEWLINE np.r_[20, 20, 20],NEWLINE 0.05,NEWLINE )[actv]NEWLINENEWLINE # Create active map to go from reduce set to fullNEWLINE self.actvMap = maps.InjectActiveCells(self.mesh, actv, np.nan)NEWLINENEWLINE # Creat reduced identity mapNEWLINE idenMap = maps.IdentityMap(nP=nC)NEWLINENEWLINE # Create the forward model operatorNEWLINE sim = mag.Simulation3DIntegral(NEWLINE self.mesh,NEWLINE survey=survey,NEWLINE chiMap=idenMap,NEWLINE actInd=actv,NEWLINE store_sensitivities="ram",NEWLINE )NEWLINE self.sim = simNEWLINE data = sim.make_synthetic_data(NEWLINE self.model, relative_error=0.0, noise_floor=1.0, add_noise=TrueNEWLINE )NEWLINENEWLINE # Create a regularizationNEWLINE reg = regularization.Sparse(self.mesh, indActive=actv, mapping=idenMap)NEWLINE reg.norms = np.c_[0, 0, 0, 0]NEWLINENEWLINE reg.mref = np.zeros(nC)NEWLINENEWLINE # Data misfit functionNEWLINE dmis = data_misfit.L2DataMisfit(simulation=sim, data=data)NEWLINENEWLINE # Add directives to the inversionNEWLINE opt = optimization.ProjectedGNCG(NEWLINE maxIter=10,NEWLINE lower=0.0,NEWLINE upper=10.0,NEWLINE maxIterLS=5,NEWLINE maxIterCG=5,NEWLINE tolCG=1e-4,NEWLINE stepOffBoundsFact=1e-4,NEWLINE )NEWLINENEWLINE invProb = inverse_problem.BaseInvProblem(dmis, reg, opt, beta=1e6)NEWLINENEWLINE # Here is where the norms are appliedNEWLINE # Use pick a treshold parameter empirically based on the distribution ofNEWLINE # model parametersNEWLINE IRLS = directives.Update_IRLS(NEWLINE f_min_change=1e-3, max_irls_iterations=20, beta_tol=1e-1, beta_search=FalseNEWLINE )NEWLINE update_Jacobi = directives.UpdatePreconditioner()NEWLINE sensitivity_weights = directives.UpdateSensitivityWeights()NEWLINE self.inv = inversion.BaseInversion(NEWLINE invProb, directiveList=[IRLS, sensitivity_weights, update_Jacobi]NEWLINE )NEWLINENEWLINE def test_mag_inverse(self):NEWLINENEWLINE # Run the inversionNEWLINE mrec = self.inv.run(self.model * 1e-4)NEWLINENEWLINE residual = np.linalg.norm(mrec - self.model) / np.linalg.norm(self.model)NEWLINE # print(residual)NEWLINE # import matplotlib.pyplot as pltNEWLINE # plt.figure()NEWLINE # ax = plt.subplot(1, 2, 1)NEWLINE # midx = 65NEWLINE # self.mesh.plotSlice(self.actvMap*mrec, ax=ax, normal='Y', ind=midx,NEWLINE # grid=True, clim=(0, 0.02))NEWLINE # ax.set_xlim(self.mesh.gridCC[:, 0].min(), self.mesh.gridCC[:, 0].max())NEWLINE # ax.set_ylim(self.mesh.gridCC[:, 2].min(), self.mesh.gridCC[:, 2].max())NEWLINENEWLINE # ax = plt.subplot(1, 2, 2)NEWLINE # self.mesh.plotSlice(self.actvMap*self.model, ax=ax, normal='Y', ind=midx,NEWLINE # grid=True, clim=(0, 0.02))NEWLINE # ax.set_xlim(self.mesh.gridCC[:, 0].min(), self.mesh.gridCC[:, 0].max())NEWLINE # ax.set_ylim(self.mesh.gridCC[:, 2].min(), self.mesh.gridCC[:, 2].max())NEWLINE # plt.show()NEWLINENEWLINE self.assertLess(residual, 1)NEWLINE # self.assertTrue(residual < 0.05)NEWLINENEWLINE def tearDown(self):NEWLINE # Clean up the working directoryNEWLINE if self.sim.store_sensitivities == "disk":NEWLINE shutil.rmtree(self.sim.sensitivity_path)NEWLINENEWLINENEWLINEif __name__ == "__main__":NEWLINE unittest.main()NEWLINE
from hs.entities.cluster_config import ClusterConfigNEWLINEimport pytestNEWLINENEWLINE@pytest.mark.xfailNEWLINEdef test_config_read_failed():NEWLINE d = {NEWLINE 'aaaaa': 'failed'NEWLINE }NEWLINE ClusterConfig.deserialize(d)NEWLINENEWLINEdef test_config_read_successful():NEWLINE d = {NEWLINE "clusters": [NEWLINE {NEWLINE "cluster": {NEWLINE "server": "http://localhost:9090"NEWLINE },NEWLINE "name": "local"NEWLINE }NEWLINE ],NEWLINE "current_cluster": "local",NEWLINE }NEWLINE res = ClusterConfig.parse_obj(d)NEWLINE print(res)NEWLINENEWLINEdef test_config_write_successful():NEWLINE d = {NEWLINE "clusters": [NEWLINE {NEWLINE "cluster": {NEWLINE "server": "http://localhost:9090"NEWLINE },NEWLINE "name": "local"NEWLINE }NEWLINE ],NEWLINE "current-cluster": "local",NEWLINE }NEWLINE res = ClusterConfig.parse_obj(d)NEWLINE dict = res.dict(by_alias=True)NEWLINE print(d, dict, sep="\n")NEWLINE assert d == dict
# Lint as: python3NEWLINENEWLINE# Copyright 2020 Google LLC.NEWLINE#NEWLINE# Licensed under the Apache License, Version 2.0 (the "License");NEWLINE# you may not use this file except in compliance with the License.NEWLINE# You may obtain a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS,NEWLINE# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE# See the License for the specific language governing permissions andNEWLINE# limitations under the License.NEWLINENEWLINE"""Tests for client."""NEWLINENEWLINEimport osNEWLINEimport reNEWLINEfrom typing import ListNEWLINENEWLINEimport csvNEWLINEimport tempfileNEWLINEimport unittestNEWLINENEWLINEimport mockNEWLINEimport pandas as pdNEWLINEimport tensorflow as tfNEWLINENEWLINEfrom tfrecorder import utilsNEWLINEfrom tfrecorder import beam_pipelineNEWLINEfrom tfrecorder import converterNEWLINEfrom tfrecorder import dataset_loaderNEWLINEfrom tfrecorder import test_utilsNEWLINEfrom tfrecorder import input_schemaNEWLINENEWLINENEWLINE# pylint: disable=protected-accessNEWLINENEWLINENEWLINEclass IsDirectoryTest(unittest.TestCase):NEWLINE """Tests `_is_directory`."""NEWLINENEWLINE def test_local_ok(self):NEWLINE """Test function returns True on local directory."""NEWLINENEWLINE with tempfile.TemporaryDirectory() as dirname:NEWLINE self.assertTrue(converter._is_directory(dirname))NEWLINENEWLINE def test_local_exists_but_not_dir(self):NEWLINE """Test function returns False on local (non-directory) file."""NEWLINENEWLINE with tempfile.NamedTemporaryFile(prefix='test_', dir='/tmp') as f:NEWLINE self.assertFalse(converter._is_directory(f.name))NEWLINENEWLINENEWLINE# TODO(cezequiel): Refactor to per-function test case classesNEWLINEclass MiscTest(unittest.TestCase):NEWLINE """Misc tests for `client` module."""NEWLINENEWLINE def setUp(self):NEWLINE self.test_df = test_utils.get_test_df()NEWLINE self.test_region = 'us-central1'NEWLINE self.test_project = 'foo'NEWLINE self.test_wheel = '/my/path/wheel.whl'NEWLINENEWLINE @mock.patch.object(beam_pipeline, 'build_pipeline', autospec=True)NEWLINE def test_create_tfrecords_direct_runner(self, _):NEWLINE """Tests `create_tfrecords` Direct case."""NEWLINE r = converter.convert(NEWLINE self.test_df,NEWLINE runner='DirectRunner',NEWLINE output_dir='/tmp/direct_runner')NEWLINE self.assertCountEqual(r.keys(), ['job_id', 'metrics', 'tfrecord_dir'])NEWLINE self.assertCountEqual(NEWLINE r['metrics'].keys(), ['rows', 'good_images', 'bad_images'])NEWLINENEWLINE @mock.patch.object(converter, '_get_dataflow_url')NEWLINE @mock.patch.object(beam_pipeline, 'build_pipeline')NEWLINE def test_create_tfrecords_dataflow_runner(self, mock_pipeline, mock_url):NEWLINE """Tests `create_tfrecords` Dataflow case."""NEWLINE job_id = 'foo_id'NEWLINE dataflow_url = 'http://some/job/url'NEWLINE mock_pipeline().run().job_id.return_value = job_idNEWLINE mock_url.return_value = dataflow_urlNEWLINE df2 = self.test_df.copy()NEWLINE df2['image_uri'] = 'gs://' + df2['image_uri']NEWLINENEWLINE outdir = '/tmp/dataflow_runner'NEWLINE os.makedirs(outdir, exist_ok=True)NEWLINE r = converter.convert(NEWLINE df2,NEWLINE runner='DataflowRunner',NEWLINE output_dir=outdir,NEWLINE region=self.test_region,NEWLINE project=self.test_project,NEWLINE tfrecorder_wheel=self.test_wheel)NEWLINENEWLINE self.assertCountEqual(r.keys(), ['job_id', 'dataflow_url', 'tfrecord_dir'])NEWLINE self.assertEqual(r['job_id'], job_id)NEWLINE self.assertEqual(r['dataflow_url'], dataflow_url)NEWLINE self.assertRegex(r['tfrecord_dir'], fr'{outdir}/tfrecorder-.+-?.*')NEWLINENEWLINENEWLINEclass InputValidationTest(unittest.TestCase):NEWLINE """'Tests for validation input data."""NEWLINENEWLINE def setUp(self):NEWLINE self.test_df = test_utils.get_test_df()NEWLINE self.test_region = 'us-central1'NEWLINE self.test_project = 'foo'NEWLINE self.test_wheel = '/my/path/wheel.whl'NEWLINE self.test_schema = input_schema.IMAGE_CSV_SCHEMANEWLINENEWLINE def test_valid_dataframe(self):NEWLINE """Tests valid DataFrame input."""NEWLINE self.assertIsNone(converter._validate_data(self.test_df, self.test_schema))NEWLINENEWLINE def test_missing_image(self):NEWLINE """Tests missing image column."""NEWLINE with self.assertRaises(AttributeError):NEWLINE df2 = self.test_df.copy()NEWLINE df2.drop('image_uri', inplace=True, axis=1)NEWLINE converter._validate_data(df2, self.test_schema)NEWLINENEWLINE def test_missing_label(self):NEWLINE """Tests missing label column."""NEWLINE with self.assertRaises(AttributeError):NEWLINE df2 = self.test_df.copy()NEWLINE df2.drop('label', inplace=True, axis=1)NEWLINE converter._validate_data(df2, self.test_schema)NEWLINENEWLINE def test_missing_split(self):NEWLINE """Tests missing split column."""NEWLINE split_key = 'split'NEWLINE schema_keys = re.escape(NEWLINE str(list(self.test_schema.input_schema_map.keys())))NEWLINE regex = fr'^.+column: {split_key}.+keys: {schema_keys}.$'NEWLINE with self.assertRaisesRegex(AttributeError, regex):NEWLINE df2 = self.test_df.copy()NEWLINE df2.drop(split_key, inplace=True, axis=1)NEWLINE converter._validate_data(df2, self.test_schema)NEWLINENEWLINE def test_valid_runner(self):NEWLINE """Tests valid runner."""NEWLINE self.assertIsNone(converter._validate_runner(NEWLINE runner='DirectRunner',NEWLINE project=self.test_project,NEWLINE region=self.test_region,NEWLINE tfrecorder_wheel=None))NEWLINENEWLINE def test_invalid_runner(self):NEWLINE """Tests invalid runner."""NEWLINE with self.assertRaises(AttributeError):NEWLINE converter._validate_runner(NEWLINE runner='FooRunner',NEWLINE project=self.test_project,NEWLINE region=self.test_region,NEWLINE tfrecorder_wheel=None)NEWLINENEWLINENEWLINE def test_gcs_path_with_dataflow_runner_missing_param(self):NEWLINE """Tests DataflowRunner with missing required parameter."""NEWLINE for p, r in [NEWLINE (None, self.test_region), (self.test_project, None), (None, None)]:NEWLINE with self.assertRaises(AttributeError) as context:NEWLINE converter._validate_runner(NEWLINE runner='DataflowRunner',NEWLINE project=p,NEWLINE region=r,NEWLINE tfrecorder_wheel=self.test_wheel)NEWLINE self.assertTrue('DataflowRunner requires valid `project` and `region`'NEWLINE in repr(context.exception))NEWLINENEWLINENEWLINE def test_gcs_path_with_dataflow_runner_missing_wheel(self):NEWLINE """Tests DataflowRunner with missing required whl path."""NEWLINE with self.assertRaises(AttributeError) as context:NEWLINE converter._validate_runner(NEWLINE runner='DataflowRunner',NEWLINE project=self.test_project,NEWLINE region=self.test_region,NEWLINE tfrecorder_wheel=None)NEWLINE self.assertTrue('requires a tfrecorder whl file for remote execution.'NEWLINE in repr(context.exception))NEWLINENEWLINENEWLINEdef _make_csv_tempfile(data: List[List[str]]) -> tempfile.NamedTemporaryFile:NEWLINE """Returns `NamedTemporaryFile` representing an image CSV."""NEWLINENEWLINE f = tempfile.NamedTemporaryFile(mode='w+t', suffix='.csv')NEWLINE writer = csv.writer(f, delimiter=',')NEWLINE for row in data:NEWLINE writer.writerow(row)NEWLINE f.seek(0)NEWLINE return fNEWLINENEWLINENEWLINEdef get_sample_image_csv_data() -> List[List[str]]:NEWLINE """Returns sample CSV data in Image CSV format."""NEWLINENEWLINE data = test_utils.get_test_data()NEWLINE header = list(data.keys())NEWLINE content = [list(row) for row in zip(*data.values())]NEWLINE return [header] + contentNEWLINENEWLINENEWLINEclass ReadCSVTest(unittest.TestCase):NEWLINE """Tests `read_csv`."""NEWLINENEWLINE def setUp(self):NEWLINE data = get_sample_image_csv_data()NEWLINE self.header = data.pop(0)NEWLINE self.sample_data = dataNEWLINENEWLINE def test_valid_csv_no_header_no_names_specified(self):NEWLINE """Tests a valid CSV without a header and no header names given."""NEWLINE f = _make_csv_tempfile(self.sample_data)NEWLINE actual = converter.read_csv(f.name, header=None)NEWLINE self.assertEqual(NEWLINE list(actual.columns),NEWLINE list(input_schema.IMAGE_CSV_SCHEMA.get_input_keys()))NEWLINE self.assertEqual(actual.values.tolist(), self.sample_data)NEWLINENEWLINE def test_valid_csv_no_header_names_specified(self):NEWLINE """Tests valid CSV without a header, but header names are given."""NEWLINE f = _make_csv_tempfile(self.sample_data)NEWLINE actual = converter.read_csv(f.name, header=None, names=self.header)NEWLINE self.assertEqual(list(actual.columns), self.header)NEWLINE self.assertEqual(actual.values.tolist(), self.sample_data)NEWLINENEWLINE def test_valid_csv_with_header_no_names_specified(self):NEWLINE """Tests valid CSV with header, and no header names given (inferred)."""NEWLINENEWLINE f = _make_csv_tempfile([self.header] + self.sample_data)NEWLINE actual = converter.read_csv(f.name)NEWLINE self.assertEqual(list(actual.columns), self.header)NEWLINE self.assertEqual(actual.values.tolist(), self.sample_data)NEWLINENEWLINE def test_valid_csv_with_header_names_specified(self):NEWLINE """Tests valid CSV with header, and header names given (override)."""NEWLINENEWLINE f = _make_csv_tempfile([self.header] + self.sample_data)NEWLINE actual = converter.read_csv(f.name, names=self.header, header=0)NEWLINE self.assertEqual(list(actual.columns), self.header)NEWLINE self.assertEqual(actual.values.tolist(), self.sample_data)NEWLINENEWLINENEWLINEclass ToDataFrameTest(unittest.TestCase):NEWLINE """Tests `to_dataframe`."""NEWLINENEWLINE def setUp(self) -> None:NEWLINE sample_data = get_sample_image_csv_data()NEWLINE columns = sample_data.pop(0)NEWLINE self.input_df = pd.DataFrame(sample_data, columns=columns)NEWLINENEWLINE @mock.patch.object(converter, 'read_csv', autospec=True)NEWLINE def test_input_csv(self, read_csv):NEWLINE """Tests valid input CSV file."""NEWLINE expected = self.input_dfNEWLINE read_csv.return_value = expectedNEWLINE f = _make_csv_tempfile(get_sample_image_csv_data())NEWLINE actual = converter.to_dataframe(f.name)NEWLINE pd.testing.assert_frame_equal(actual, expected)NEWLINENEWLINE def test_input_dataframe_no_names_specified(self):NEWLINE """Tests valid input dataframe with no header names specified."""NEWLINE actual = converter.to_dataframe(self.input_df)NEWLINE pd.testing.assert_frame_equal(actual, self.input_df)NEWLINENEWLINE def test_input_dataframe_with_header(self):NEWLINE """Tests valid input dataframe with header specified."""NEWLINE names = list(self.input_df.columns[0:-1])NEWLINE actual = converter.to_dataframe(self.input_df, names=names)NEWLINE pd.testing.assert_frame_equal(actual, self.input_df[names])NEWLINENEWLINE @mock.patch.object(utils, 'read_image_directory', autospec=True)NEWLINE def test_input_image_dir(self, mock_fn):NEWLINE """Tests valid input image directory."""NEWLINENEWLINE mock_fn.return_value = self.input_dfNEWLINENEWLINE with tempfile.TemporaryDirectory() as input_data:NEWLINE actual = converter.to_dataframe(input_data)NEWLINE pd.testing.assert_frame_equal(actual, self.input_df)NEWLINENEWLINE def test_error_invalid_inputs(self):NEWLINE """Tests error handling with different invalid inputs."""NEWLINE inputs = [0, 'not_a_csv_file', list(), dict()]NEWLINE for input_data in inputs:NEWLINE with self.assertRaises(ValueError):NEWLINE converter.to_dataframe(input_data)NEWLINENEWLINENEWLINEclass ConvertAndLoadTest(unittest.TestCase):NEWLINE """Tests `convert_and_load`."""NEWLINENEWLINE def setUp(self):NEWLINE self.tfrecord_dir = '/path/to/tfrecords'NEWLINE self.dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])NEWLINE self.datasets = {NEWLINE 'train': self.dataset,NEWLINE 'validation': self.dataset,NEWLINE 'test': self.dataset,NEWLINE }NEWLINENEWLINE @mock.patch.object(dataset_loader, 'load', autospec=True)NEWLINE @mock.patch.object(converter, 'convert', autospec=True)NEWLINE def test_convert_and_load_normal(self, convert_fn, load_fn):NEWLINE """Tests normal case."""NEWLINE convert_fn.return_value = dict(tfrecord_dir=self.tfrecord_dir)NEWLINE load_fn.return_value = self.datasetsNEWLINE source = '/path/to/data.csv'NEWLINE datasets = converter.convert_and_load(source)NEWLINE self.assertEqual(datasets, self.datasets)NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE unittest.main()NEWLINE
import asyncioNEWLINEimport ioNEWLINENEWLINEimport discordNEWLINEfrom asyncpg.pool import PoolNEWLINEfrom discord.ext import commandsNEWLINENEWLINEfrom utils.db.cache import BotCacheNEWLINEfrom utils.ext import standards as stdNEWLINENEWLINENEWLINEclass Context(commands.Context):NEWLINE def __init__(self, **kwargs):NEWLINE super().__init__(**kwargs)NEWLINE self.pool = self.bot.dbNEWLINE self._db = NoneNEWLINENEWLINE async def safe_send(self, content, *, escape_mentions=True, **kwargs):NEWLINE if escape_mentions:NEWLINE content = discord.utils.escape_mentions(content)NEWLINENEWLINE content.replace("`", "")NEWLINENEWLINE if len(content) > 2000:NEWLINE fp = io.BytesIO(content.encode())NEWLINE kwargs.pop('file', None)NEWLINE return await self.reply(file=discord.File(fp, filename='message_to_long.txt'), **kwargs)NEWLINE else:NEWLINE return await self.reply(content)NEWLINENEWLINE @propertyNEWLINE def db(self) -> Pool:NEWLINE return self._db if self._db else self.poolNEWLINENEWLINE @propertyNEWLINE def cache(self) -> BotCache:NEWLINE return self.bot.cacheNEWLINENEWLINE async def lang(self, utils=False, module=None):NEWLINE if module is None:NEWLINE module = self.cog.qualified_nameNEWLINENEWLINE if isinstance(module, list):NEWLINE data = {}NEWLINE for _module in module:NEWLINE data |= await self.bot.lang(self.guild.id, _module.lower(), utils)NEWLINE else:NEWLINE data = await self.bot.lang(self.guild.id, module.lower(), utils)NEWLINENEWLINE return dataNEWLINENEWLINE async def release(self):NEWLINE if self._db is not None:NEWLINE await self.bot.pool.release(self._db)NEWLINE self._db = NoneNEWLINENEWLINE async def error(self, message: str, **kwargs):NEWLINE return await self.reply(embed=std.getErrorEmbed(message), **kwargs)NEWLINENEWLINE async def embed(self, message: str, signed=False, **kwargs):NEWLINE embed = std.getEmbed(message)NEWLINE if signed:NEWLINE embed.set_footer(icon_url=self.author.avatar_url, text=f'Requested by {self.author}')NEWLINENEWLINE return await self.reply(embed=embed, **kwargs)NEWLINENEWLINE async def prompt(self, message, *, timeout=60.0, delete_after=True, reacquire=True,NEWLINE author_id=None):NEWLINE if not self.channel.permissions_for(self.me).add_reactions:NEWLINE raise RuntimeError('Der Bot kann keine Reaktionen hinzufügen.')NEWLINENEWLINE fmt = f'{message}\n\nReagiere mit {std.yes_emoji} um zu bestätigen oder {std.no_emoji} ' \NEWLINE f'um abzubrechen. 'NEWLINENEWLINE author_id = author_id or self.author.idNEWLINE msg = await self.reply('Ping!', embed=discord.Embed(color=std.normal_color, description=fmt))NEWLINENEWLINE confirm = NoneNEWLINENEWLINE def check(payload):NEWLINE nonlocal confirmNEWLINENEWLINE if payload.message_id != msg.id or payload.user_id != author_id:NEWLINE return FalseNEWLINENEWLINE codepoint = str(payload.emoji)NEWLINENEWLINE if codepoint == std.yes_emoji:NEWLINE confirm = TrueNEWLINE return TrueNEWLINE elif codepoint == std.no_emoji:NEWLINE confirm = FalseNEWLINE return TrueNEWLINENEWLINE return FalseNEWLINENEWLINE for emoji in (std.yes_emoji, std.no_emoji):NEWLINE await msg.add_reaction(emoji)NEWLINENEWLINE if reacquire:NEWLINE await self.release()NEWLINENEWLINE try:NEWLINE await self.bot.wait_for('raw_reaction_add', check=check, timeout=timeout)NEWLINE except asyncio.TimeoutError:NEWLINE confirm = NoneNEWLINENEWLINE try:NEWLINE if delete_after:NEWLINE await msg.delete()NEWLINE finally:NEWLINE return confirmNEWLINENEWLINENEWLINEclass FakeContext:NEWLINE def __init__(self, bot, guild):NEWLINE self.bot = botNEWLINE self.guild = guildNEWLINENEWLINE @propertyNEWLINE def cache(self):NEWLINE return self.bot.cacheNEWLINENEWLINE @propertyNEWLINE def me(self):NEWLINE return self.guild.meNEWLINE
"""PyTest cases related to the integration between FERC1 & EIA 860/923."""NEWLINEimport loggingNEWLINENEWLINEimport pytestNEWLINENEWLINEimport pudlNEWLINENEWLINElogger = logging.getLogger(__name__)NEWLINENEWLINENEWLINE@pytest.fixture(scope="module")NEWLINEdef fast_out(pudl_engine, pudl_datastore_fixture):NEWLINE """A PUDL output object for use in CI."""NEWLINE return pudl.output.pudltabl.PudlTabl(NEWLINE pudl_engine,NEWLINE ds=pudl_datastore_fixture,NEWLINE freq="MS",NEWLINE fill_fuel_cost=True,NEWLINE roll_fuel_cost=True,NEWLINE fill_net_gen=TrueNEWLINE )NEWLINENEWLINENEWLINEdef test_fuel_ferc1(fast_out):NEWLINE """Pull FERC 1 Fuel Data."""NEWLINE logger.info("Pulling a year's worth of FERC1 Fuel data.")NEWLINE fuel_df = fast_out.fuel_ferc1()NEWLINE logger.info(f"Pulled {len(fuel_df)} Fuel FERC1 records.")NEWLINENEWLINENEWLINEdef test_plants_steam_ferc1(fast_out):NEWLINE """Pull FERC 1 Steam Plants."""NEWLINE logger.info("Pulling FERC1 Steam Plants")NEWLINE steam_df = fast_out.plants_steam_ferc1()NEWLINE logger.info(f"Pulled{len(steam_df)} FERC1 steam plants records.")NEWLINENEWLINENEWLINEdef test_fbp_ferc1(fast_out):NEWLINE """Calculate fuel consumption by plant for FERC 1 for one year of data."""NEWLINE logger.info("Calculating FERC1 Fuel by Plant.")NEWLINE fbp_df = fast_out.fbp_ferc1()NEWLINE logger.info(f"Generated {len(fbp_df)} FERC1 fuel by plant records.")NEWLINENEWLINENEWLINEdef test_bga_eia860(fast_out):NEWLINE """Pull original EIA 860 Boiler Generator Associations."""NEWLINE logger.info("Pulling the EIA 860 Boiler Generator Associations.")NEWLINE bga_df = fast_out.bga_eia860()NEWLINE logger.info(f"Generated {len(bga_df)} BGA EIA 860 records.")NEWLINENEWLINENEWLINEdef test_own_eia860(fast_out):NEWLINE """Read EIA 860 generator ownership data."""NEWLINE logger.info("Pulling the EIA 860 ownership data.")NEWLINE own_df = fast_out.own_eia860()NEWLINE logger.info(f"Generated {len(own_df)} EIA 860 ownership records.")NEWLINENEWLINENEWLINEdef test_gf_eia923(fast_out):NEWLINE """Read EIA 923 generator fuel data. (not used in MCOE)."""NEWLINE logger.info("Pulling the EIA 923 generator fuel data.")NEWLINE gf_df = fast_out.gf_eia923()NEWLINE logger.info(f"Generated {len(gf_df)} EIA 923 generator fuel records.")NEWLINENEWLINENEWLINEdef test_mcoe(fast_out):NEWLINE """Calculate MCOE."""NEWLINE logger.info("Calculating MCOE.")NEWLINE mcoe_df = fast_out.mcoe()NEWLINE logger.info(f"Generated {len(mcoe_df)} MCOE records.")NEWLINENEWLINENEWLINEdef test_eia861_etl(fast_out):NEWLINE """Make sure that the EIA 861 Extract-Transform steps work."""NEWLINE fast_out.etl_eia861()NEWLINENEWLINENEWLINEdef test_ferc714_etl(fast_out):NEWLINE """Make sure that the FERC 714 Extract-Transform steps work."""NEWLINE fast_out.etl_ferc714()NEWLINENEWLINENEWLINEdef test_ferc714_respondents(fast_out, pudl_settings_fixture):NEWLINE """Test the FERC 714 Respondent & Service Territory outputs."""NEWLINE ferc714_out = pudl.output.ferc714.Respondents(NEWLINE fast_out,NEWLINE pudl_settings=pudl_settings_fixture,NEWLINE )NEWLINE _ = ferc714_out.annualize()NEWLINE _ = ferc714_out.categorize()NEWLINE _ = ferc714_out.summarize_demand()NEWLINE _ = ferc714_out.fipsify()NEWLINE _ = ferc714_out.georef_counties()NEWLINE
from __future__ import absolute_import, division, print_functionNEWLINENEWLINEimport base64NEWLINEfrom collections import defaultdictNEWLINEimport contextlibNEWLINEimport fnmatchNEWLINEfrom glob2 import globNEWLINEimport jsonNEWLINEfrom locale import getpreferredencodingNEWLINEimport loggingNEWLINEimport logging.configNEWLINEimport mmapNEWLINEimport operatorNEWLINEimport osNEWLINEfrom os.path import dirname, getmtime, getsize, isdir, join, isfile, abspath, islinkNEWLINEimport reNEWLINEimport statNEWLINEimport subprocessNEWLINEimport sysNEWLINEimport shutilNEWLINEimport tarfileNEWLINEimport tempfileNEWLINEimport timeNEWLINEimport yamlNEWLINEimport zipfileNEWLINENEWLINEfrom distutils.version import LooseVersionNEWLINEimport filelockNEWLINENEWLINEfrom conda import __version__ as conda_versionNEWLINENEWLINEfrom .conda_interface import hashsum_file, md5_file, unix_path_to_win, win_path_to_unixNEWLINEfrom .conda_interface import PY3, iteritemsNEWLINEfrom .conda_interface import root_dir, pkgs_dirsNEWLINEfrom .conda_interface import string_types, url_path, get_rc_urlsNEWLINEfrom .conda_interface import memoizedNEWLINEfrom .conda_interface import StringIONEWLINEfrom .conda_interface import VersionOrder, MatchSpecNEWLINEfrom .conda_interface import cc_conda_buildNEWLINE# NOQA because it is not used in this file.NEWLINEfrom conda_build.conda_interface import rm_rf as _rm_rf # NOQANEWLINEfrom conda_build.os_utils import externalNEWLINENEWLINEif PY3:NEWLINE import urllib.parse as urlparseNEWLINE import urllib.request as urllibNEWLINE # NOQA because it is not used in this file.NEWLINE from contextlib import ExitStack # NOQANEWLINE PermissionError = PermissionError # NOQANEWLINEelse:NEWLINE import urlparseNEWLINE import urllibNEWLINE # NOQA because it is not used in this file.NEWLINE from contextlib2 import ExitStack # NOQANEWLINE PermissionError = OSErrorNEWLINENEWLINENEWLINEon_win = (sys.platform == 'win32')NEWLINENEWLINEcodec = getpreferredencoding() or 'utf-8'NEWLINEon_win = sys.platform == "win32"NEWLINEroot_script_dir = os.path.join(root_dir, 'Scripts' if on_win else 'bin')NEWLINEmmap_MAP_PRIVATE = 0 if on_win else mmap.MAP_PRIVATENEWLINEmmap_PROT_READ = 0 if on_win else mmap.PROT_READNEWLINEmmap_PROT_WRITE = 0 if on_win else mmap.PROT_WRITENEWLINENEWLINENEWLINEPY_TMPL = """NEWLINE# -*- coding: utf-8 -*-NEWLINEimport reNEWLINEimport sysNEWLINENEWLINEfrom %(module)s import %(import_name)sNEWLINENEWLINEif __name__ == '__main__':NEWLINE sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])NEWLINE sys.exit(%(func)s())NEWLINE"""NEWLINENEWLINENEWLINEdef get_recipe_abspath(recipe):NEWLINE """resolve recipe dir as absolute path. If recipe is a tarball rather than a folder,NEWLINE extract it and return the extracted directory.NEWLINENEWLINE Returns the absolute path, and a boolean flag that is true if a tarball has been extractedNEWLINE and needs cleanup.NEWLINE """NEWLINE # Don't use byte literals for paths in Python 2NEWLINE if not PY3:NEWLINE recipe = recipe.decode(getpreferredencoding() or 'utf-8')NEWLINE if isfile(recipe):NEWLINE if recipe.endswith(('.tar', '.tar.gz', '.tgz', '.tar.bz2')):NEWLINE recipe_dir = tempfile.mkdtemp()NEWLINE t = tarfile.open(recipe, 'r:*')NEWLINE t.extractall(path=recipe_dir)NEWLINE # At some stage the old build system started to tar up recipes.NEWLINE recipe_tarfile = os.path.join(recipe_dir, 'info', 'recipe.tar')NEWLINE if isfile(recipe_tarfile):NEWLINE t2 = tarfile.open(recipe_tarfile, 'r:*')NEWLINE t2.extractall(path=os.path.join(recipe_dir, 'info'))NEWLINE t2.close()NEWLINE t.close()NEWLINE need_cleanup = TrueNEWLINE else:NEWLINE print("Ignoring non-recipe: %s" % recipe)NEWLINE return (None, None)NEWLINE else:NEWLINE recipe_dir = abspath(os.path.join(os.getcwd(), recipe))NEWLINE need_cleanup = FalseNEWLINE if not os.path.exists(recipe_dir):NEWLINE raise ValueError("Package or recipe at path {0} does not exist".format(recipe_dir))NEWLINE return recipe_dir, need_cleanupNEWLINENEWLINENEWLINE@contextlib.contextmanagerNEWLINEdef try_acquire_locks(locks, timeout):NEWLINE """Try to acquire all locks. If any lock can't be immediately acquired, free all locksNEWLINENEWLINE http://stackoverflow.com/questions/9814008/multiple-mutex-locking-strategies-and-why-libraries-dont-use-address-comparisonNEWLINE """NEWLINE t = time.time()NEWLINE while (time.time() - t < timeout):NEWLINE for lock in locks:NEWLINE try:NEWLINE lock.acquire(timeout=0.1)NEWLINE except filelock.Timeout:NEWLINE for lock in locks:NEWLINE lock.release()NEWLINE breakNEWLINE breakNEWLINE yieldNEWLINE for lock in locks:NEWLINE if lock:NEWLINE lock.release()NEWLINENEWLINENEWLINE# with each of these, we are copying less metadata. This seems to be necessaryNEWLINE# to cope with some shared filesystems with some virtual machine setups.NEWLINE# See https://github.com/conda/conda-build/issues/1426NEWLINEdef _copy_with_shell_fallback(src, dst):NEWLINE is_copied = FalseNEWLINE for func in (shutil.copy2, shutil.copy, shutil.copyfile):NEWLINE try:NEWLINE func(src, dst)NEWLINE is_copied = TrueNEWLINE breakNEWLINE except (IOError, OSError, PermissionError):NEWLINE continueNEWLINE if not is_copied:NEWLINE try:NEWLINE subprocess.check_call('cp -a {} {}'.format(src, dst), shell=True,NEWLINE stderr=subprocess.PIPE, stdout=subprocess.PIPE)NEWLINE except subprocess.CalledProcessError as e:NEWLINE if not os.path.isfile(dst):NEWLINE raise OSError("Failed to copy {} to {}. Error was: {}".format(src, dst, e))NEWLINENEWLINENEWLINEdef get_prefix_replacement_paths(src, dst):NEWLINE ssplit = src.split(os.path.sep)NEWLINE dsplit = dst.split(os.path.sep)NEWLINE while ssplit and ssplit[-1] == dsplit[-1]:NEWLINE del ssplit[-1]NEWLINE del dsplit[-1]NEWLINE return os.path.join(*ssplit), os.path.join(*dsplit)NEWLINENEWLINENEWLINEdef copy_into(src, dst, timeout=90, symlinks=False, lock=None, locking=True, clobber=False):NEWLINE """Copy all the files and directories in src to the directory dst"""NEWLINE log = get_logger(__name__)NEWLINE if symlinks and islink(src):NEWLINE try:NEWLINE os.makedirs(os.path.dirname(dst))NEWLINE except OSError:NEWLINE passNEWLINE if os.path.lexists(dst):NEWLINE os.remove(dst)NEWLINE src_base, dst_base = get_prefix_replacement_paths(src, dst)NEWLINE src_target = os.readlink(src)NEWLINE src_replaced = src_target.replace(src_base, dst_base)NEWLINE os.symlink(src_replaced, dst)NEWLINE try:NEWLINE st = os.lstat(src)NEWLINE mode = stat.S_IMODE(st.st_mode)NEWLINE os.lchmod(dst, mode)NEWLINE except:NEWLINE pass # lchmod not availableNEWLINE elif isdir(src):NEWLINE merge_tree(src, dst, symlinks, timeout=timeout, lock=lock, locking=locking, clobber=clobber)NEWLINENEWLINE else:NEWLINE if isdir(dst):NEWLINE dst_fn = os.path.join(dst, os.path.basename(src))NEWLINE else:NEWLINE dst_fn = dstNEWLINENEWLINE if os.path.isabs(src):NEWLINE src_folder = os.path.dirname(src)NEWLINE else:NEWLINE if os.path.sep in dst_fn:NEWLINE src_folder = os.path.dirname(dst_fn)NEWLINE if not os.path.isdir(src_folder):NEWLINE os.makedirs(src_folder)NEWLINE else:NEWLINE src_folder = os.getcwd()NEWLINENEWLINE if os.path.islink(src) and not os.path.exists(os.path.realpath(src)):NEWLINE log.warn('path %s is a broken symlink - ignoring copy', src)NEWLINE returnNEWLINENEWLINE if not lock and locking:NEWLINE lock = get_lock(src_folder, timeout=timeout)NEWLINE locks = [lock] if locking else []NEWLINE with try_acquire_locks(locks, timeout):NEWLINE # if intermediate folders not not exist create themNEWLINE dst_folder = os.path.dirname(dst)NEWLINE if dst_folder and not os.path.exists(dst_folder):NEWLINE try:NEWLINE os.makedirs(dst_folder)NEWLINE except OSError:NEWLINE passNEWLINE try:NEWLINE _copy_with_shell_fallback(src, dst_fn)NEWLINE except shutil.Error:NEWLINE log.debug("skipping %s - already exists in %s",NEWLINE os.path.basename(src), dst)NEWLINENEWLINENEWLINE# http://stackoverflow.com/a/22331852/1170370NEWLINEdef copytree(src, dst, symlinks=False, ignore=None, dry_run=False):NEWLINE if not os.path.exists(dst):NEWLINE os.makedirs(dst)NEWLINE shutil.copystat(src, dst)NEWLINE lst = os.listdir(src)NEWLINE if ignore:NEWLINE excl = ignore(src, lst)NEWLINE lst = [x for x in lst if x not in excl]NEWLINENEWLINE # do not copy lock filesNEWLINE if '.conda_lock' in lst:NEWLINE lst.remove('.conda_lock')NEWLINENEWLINE dst_lst = [os.path.join(dst, item) for item in lst]NEWLINENEWLINE if not dry_run:NEWLINE for idx, item in enumerate(lst):NEWLINE s = os.path.join(src, item)NEWLINE d = dst_lst[idx]NEWLINE if symlinks and os.path.islink(s):NEWLINE if os.path.lexists(d):NEWLINE os.remove(d)NEWLINE os.symlink(os.readlink(s), d)NEWLINE try:NEWLINE st = os.lstat(s)NEWLINE mode = stat.S_IMODE(st.st_mode)NEWLINE os.lchmod(d, mode)NEWLINE except:NEWLINE pass # lchmod not availableNEWLINE elif os.path.isdir(s):NEWLINE copytree(s, d, symlinks, ignore)NEWLINE else:NEWLINE _copy_with_shell_fallback(s, d)NEWLINENEWLINE return dst_lstNEWLINENEWLINENEWLINEdef merge_tree(src, dst, symlinks=False, timeout=90, lock=None, locking=True, clobber=False):NEWLINE """NEWLINE Merge src into dst recursively by copying all files from src into dst.NEWLINE Return a list of all files copied.NEWLINENEWLINE Like copytree(src, dst), but raises an error if merging the two treesNEWLINE would overwrite any files.NEWLINE """NEWLINE dst = os.path.normpath(os.path.normcase(dst))NEWLINE src = os.path.normpath(os.path.normcase(src))NEWLINE assert not dst.startswith(src), ("Can't merge/copy source into subdirectory of itself. "NEWLINE "Please create separate spaces for these things.")NEWLINENEWLINE new_files = copytree(src, dst, symlinks=symlinks, dry_run=True)NEWLINE existing = [f for f in new_files if isfile(f)]NEWLINENEWLINE if existing and not clobber:NEWLINE raise IOError("Can't merge {0} into {1}: file exists: "NEWLINE "{2}".format(src, dst, existing[0]))NEWLINENEWLINE locks = []NEWLINE if locking:NEWLINE if not lock:NEWLINE lock = get_lock(src, timeout=timeout)NEWLINE locks = [lock]NEWLINE with try_acquire_locks(locks, timeout):NEWLINE copytree(src, dst, symlinks=symlinks)NEWLINENEWLINENEWLINE# purpose here is that we want *one* lock per location on disk. It can be locked or unlockedNEWLINE# at any time, but the lock within this process should all be tied to the same trackingNEWLINE# mechanism.NEWLINE_lock_folders = (os.path.join(root_dir, 'locks'),NEWLINE os.path.expanduser(os.path.join('~', '.conda_build_locks')))NEWLINENEWLINENEWLINEdef get_lock(folder, timeout=90):NEWLINE fl = NoneNEWLINE try:NEWLINE location = os.path.abspath(os.path.normpath(folder))NEWLINE except OSError:NEWLINE location = folderNEWLINE b_location = locationNEWLINE if hasattr(b_location, 'encode'):NEWLINE b_location = b_location.encode()NEWLINE lock_filename = base64.urlsafe_b64encode(b_location)[:20]NEWLINE if hasattr(lock_filename, 'decode'):NEWLINE lock_filename = lock_filename.decode()NEWLINE for locks_dir in _lock_folders:NEWLINE try:NEWLINE if not os.path.isdir(locks_dir):NEWLINE os.makedirs(locks_dir)NEWLINE lock_file = os.path.join(locks_dir, lock_filename)NEWLINE with open(lock_file, 'w') as f:NEWLINE f.write("")NEWLINE fl = filelock.FileLock(lock_file, timeout)NEWLINE breakNEWLINE except (OSError, IOError):NEWLINE continueNEWLINE else:NEWLINE raise RuntimeError("Could not write locks folder to either system location ({0})"NEWLINE "or user location ({1}). Aborting.".format(*_lock_folders))NEWLINE return flNEWLINENEWLINENEWLINEdef get_conda_operation_locks(locking=True, bldpkgs_dirs=None, timeout=90):NEWLINE locks = []NEWLINE bldpkgs_dirs = ensure_list(bldpkgs_dirs)NEWLINE # locks enabled by defaultNEWLINE if locking:NEWLINE _pkgs_dirs = pkgs_dirs[:1]NEWLINE locked_folders = _pkgs_dirs + list(bldpkgs_dirs)NEWLINE for folder in locked_folders:NEWLINE if not os.path.isdir(folder):NEWLINE os.makedirs(folder)NEWLINE lock = get_lock(folder, timeout=timeout)NEWLINE locks.append(lock)NEWLINE # lock used to generally indicate a conda operation occurringNEWLINE locks.append(get_lock('conda-operation', timeout=timeout))NEWLINE return locksNEWLINENEWLINENEWLINEdef relative(f, d='lib'):NEWLINE assert not f.startswith('/'), fNEWLINE assert not d.startswith('/'), dNEWLINE d = d.strip('/').split('/')NEWLINE if d == ['.']:NEWLINE d = []NEWLINE f = dirname(f).split('/')NEWLINE if f == ['']:NEWLINE f = []NEWLINE while d and f and d[0] == f[0]:NEWLINE d.pop(0)NEWLINE f.pop(0)NEWLINE return '/'.join(((['..'] * len(f)) if f else ['.']) + d)NEWLINENEWLINENEWLINEdef tar_xf(tarball, dir_path, mode='r:*'):NEWLINE if tarball.lower().endswith('.tar.z'):NEWLINE uncompress = external.find_executable('uncompress')NEWLINE if not uncompress:NEWLINE uncompress = external.find_executable('gunzip')NEWLINE if not uncompress:NEWLINE sys.exit("""\NEWLINEuncompress (or gunzip) is required to unarchive .z source files.NEWLINE""")NEWLINE check_call_env([uncompress, '-f', tarball])NEWLINE tarball = tarball[:-2]NEWLINE if not PY3 and tarball.endswith('.tar.xz'):NEWLINE unxz = external.find_executable('unxz')NEWLINE if not unxz:NEWLINE sys.exit("""\NEWLINEunxz is required to unarchive .xz source files.NEWLINE""")NEWLINENEWLINE check_call_env([unxz, '-f', '-k', tarball])NEWLINE tarball = tarball[:-3]NEWLINE t = tarfile.open(tarball, mode)NEWLINE if not PY3:NEWLINE t.extractall(path=dir_path.encode(codec))NEWLINE else:NEWLINE t.extractall(path=dir_path)NEWLINE t.close()NEWLINENEWLINENEWLINEdef unzip(zip_path, dir_path):NEWLINE z = zipfile.ZipFile(zip_path)NEWLINE for info in z.infolist():NEWLINE name = info.filenameNEWLINE if name.endswith('/'):NEWLINE continueNEWLINE path = join(dir_path, *name.split('/'))NEWLINE dp = dirname(path)NEWLINE if not isdir(dp):NEWLINE os.makedirs(dp)NEWLINE with open(path, 'wb') as fo:NEWLINE fo.write(z.read(name))NEWLINE unix_attributes = info.external_attr >> 16NEWLINE if unix_attributes:NEWLINE os.chmod(path, unix_attributes)NEWLINE z.close()NEWLINENEWLINENEWLINEdef file_info(path):NEWLINE return {'size': getsize(path),NEWLINE 'md5': md5_file(path),NEWLINE 'sha256': hashsum_file(path, 'sha256'),NEWLINE 'mtime': getmtime(path)}NEWLINENEWLINE# Taken from toolzNEWLINENEWLINENEWLINEdef groupby(key, seq):NEWLINE """ Group a collection by a key functionNEWLINE >>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']NEWLINE >>> groupby(len, names) # doctest: +SKIPNEWLINE {3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}NEWLINE >>> iseven = lambda x: x % 2 == 0NEWLINE >>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIPNEWLINE {False: [1, 3, 5, 7], True: [2, 4, 6, 8]}NEWLINE Non-callable keys imply grouping on a member.NEWLINE >>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},NEWLINE ... {'name': 'Bob', 'gender': 'M'},NEWLINE ... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIPNEWLINE {'F': [{'gender': 'F', 'name': 'Alice'}],NEWLINE 'M': [{'gender': 'M', 'name': 'Bob'},NEWLINE {'gender': 'M', 'name': 'Charlie'}]}NEWLINE See Also:NEWLINE countbyNEWLINE """NEWLINE if not callable(key):NEWLINE key = getter(key)NEWLINE d = defaultdict(lambda: [].append)NEWLINE for item in seq:NEWLINE d[key(item)](item)NEWLINE rv = {}NEWLINE for k, v in iteritems(d):NEWLINE rv[k] = v.__self__NEWLINE return rvNEWLINENEWLINENEWLINEdef getter(index):NEWLINE if isinstance(index, list):NEWLINE if len(index) == 1:NEWLINE index = index[0]NEWLINE return lambda x: (x[index],)NEWLINE elif index:NEWLINE return operator.itemgetter(*index)NEWLINE else:NEWLINE return lambda x: ()NEWLINE else:NEWLINE return operator.itemgetter(index)NEWLINENEWLINENEWLINEdef comma_join(items):NEWLINE """NEWLINE Like ', '.join(items) but with andNEWLINENEWLINE Examples:NEWLINENEWLINE >>> comma_join(['a'])NEWLINE 'a'NEWLINE >>> comma_join(['a', 'b'])NEWLINE 'a and b'NEWLINE >>> comma_join(['a', 'b', 'c])NEWLINE 'a, b, and c'NEWLINE """NEWLINE return ' and '.join(items) if len(items) <= 2 else ', '.join(items[:-1]) + ', and ' + items[-1]NEWLINENEWLINENEWLINEdef safe_print_unicode(*args, **kwargs):NEWLINE """NEWLINE prints unicode strings to stdout using configurable `errors` handler forNEWLINE encoding errorsNEWLINENEWLINE :param args: unicode strings to print to stdoutNEWLINE :param sep: separator (defaults to ' ')NEWLINE :param end: ending character (defaults to '\n')NEWLINE :param errors: error handler for encoding errors (defaults to 'replace')NEWLINE """NEWLINE sep = kwargs.pop('sep', u' ')NEWLINE end = kwargs.pop('end', u'\n')NEWLINE errors = kwargs.pop('errors', 'replace')NEWLINE if PY3:NEWLINE func = sys.stdout.buffer.writeNEWLINE else:NEWLINE func = sys.stdout.writeNEWLINE line = sep.join(args) + endNEWLINE encoding = sys.stdout.encoding or 'utf8'NEWLINE func(line.encode(encoding, errors))NEWLINENEWLINENEWLINEdef rec_glob(path, patterns):NEWLINE result = []NEWLINE for d_f in os.walk(path):NEWLINE # ignore the .git folderNEWLINE # if '.git' in d_f[0]:NEWLINE # continueNEWLINE m = []NEWLINE for pattern in patterns:NEWLINE m.extend(fnmatch.filter(d_f[2], pattern))NEWLINE if m:NEWLINE result.extend([os.path.join(d_f[0], f) for f in m])NEWLINE return resultNEWLINENEWLINENEWLINEdef convert_unix_path_to_win(path):NEWLINE if external.find_executable('cygpath'):NEWLINE cmd = "cygpath -w {0}".format(path)NEWLINE if PY3:NEWLINE path = subprocess.getoutput(cmd)NEWLINE else:NEWLINE path = subprocess.check_output(cmd.split()).rstrip().rstrip("\\")NEWLINENEWLINE else:NEWLINE path = unix_path_to_win(path)NEWLINE return pathNEWLINENEWLINENEWLINEdef convert_win_path_to_unix(path):NEWLINE if external.find_executable('cygpath'):NEWLINE cmd = "cygpath -u {0}".format(path)NEWLINE if PY3:NEWLINE path = subprocess.getoutput(cmd)NEWLINE else:NEWLINE path = subprocess.check_output(cmd.split()).rstrip().rstrip("\\")NEWLINENEWLINE else:NEWLINE path = win_path_to_unix(path)NEWLINE return pathNEWLINENEWLINENEWLINE# Used for translating local paths into url (file://) pathsNEWLINE# http://stackoverflow.com/a/14298190/1170370NEWLINEdef path2url(path):NEWLINE return urlparse.urljoin('file:', urllib.pathname2url(path))NEWLINENEWLINENEWLINEdef get_stdlib_dir(prefix, py_ver):NEWLINE if sys.platform == 'win32':NEWLINE lib_dir = os.path.join(prefix, 'Lib')NEWLINE else:NEWLINE lib_dir = os.path.join(prefix, 'lib', 'python{}'.format(py_ver))NEWLINE return lib_dirNEWLINENEWLINENEWLINEdef get_site_packages(prefix, py_ver):NEWLINE return os.path.join(get_stdlib_dir(prefix, py_ver), 'site-packages')NEWLINENEWLINENEWLINEdef get_build_folders(croot):NEWLINE # remember, glob is not a regex.NEWLINE return glob(os.path.join(croot, "*" + "[0-9]" * 10 + "*"))NEWLINENEWLINENEWLINEdef prepend_bin_path(env, prefix, prepend_prefix=False):NEWLINE # bin_dirname takes care of bin on *nix, Scripts on winNEWLINE env['PATH'] = join(prefix, bin_dirname) + os.pathsep + env['PATH']NEWLINE if sys.platform == "win32":NEWLINE env['PATH'] = join(prefix, "Library", "mingw-w64", "bin") + os.pathsep + \NEWLINE join(prefix, "Library", "usr", "bin") + os.pathsep + os.pathsep + \NEWLINE join(prefix, "Library", "bin") + os.pathsep + \NEWLINE join(prefix, "Scripts") + os.pathsep + \NEWLINE env['PATH']NEWLINE prepend_prefix = True # windows has Python in the prefix. Use it.NEWLINE if prepend_prefix:NEWLINE env['PATH'] = prefix + os.pathsep + env['PATH']NEWLINE return envNEWLINENEWLINENEWLINE# not currently used. Leaving in because it may be useful for when we do thingsNEWLINE# like load setup.py data, and we need the modules from some prefix other thanNEWLINE# the root prefix, which is what conda-build runs from.NEWLINE@contextlib.contextmanagerNEWLINEdef sys_path_prepended(prefix):NEWLINE path_backup = sys.path[:]NEWLINE if on_win:NEWLINE sys.path.insert(1, os.path.join(prefix, 'lib', 'site-packages'))NEWLINE else:NEWLINE lib_dir = os.path.join(prefix, 'lib')NEWLINE python_dir = glob(os.path.join(lib_dir, 'python[0-9\.]*'))NEWLINE if python_dir:NEWLINE python_dir = python_dir[0]NEWLINE sys.path.insert(1, os.path.join(python_dir, 'site-packages'))NEWLINE try:NEWLINE yieldNEWLINE finally:NEWLINE sys.path = path_backupNEWLINENEWLINENEWLINE@contextlib.contextmanagerNEWLINEdef path_prepended(prefix):NEWLINE old_path = os.environ['PATH']NEWLINE os.environ['PATH'] = prepend_bin_path(os.environ.copy(), prefix, True)['PATH']NEWLINE try:NEWLINE yieldNEWLINE finally:NEWLINE os.environ['PATH'] = old_pathNEWLINENEWLINENEWLINEbin_dirname = 'Scripts' if sys.platform == 'win32' else 'bin'NEWLINENEWLINEentry_pat = re.compile('\s*([\w\-\.]+)\s*=\s*([\w.]+):([\w.]+)\s*$')NEWLINENEWLINENEWLINEdef iter_entry_points(items):NEWLINE for item in items:NEWLINE m = entry_pat.match(item)NEWLINE if m is None:NEWLINE sys.exit("Error cound not match entry point: %r" % item)NEWLINE yield m.groups()NEWLINENEWLINENEWLINEdef create_entry_point(path, module, func, config):NEWLINE import_name = func.split('.')[0]NEWLINE pyscript = PY_TMPL % {NEWLINE 'module': module, 'func': func, 'import_name': import_name}NEWLINE if on_win:NEWLINE with open(path + '-script.py', 'w') as fo:NEWLINE if os.path.isfile(os.path.join(config.host_prefix, 'python_d.exe')):NEWLINE fo.write('#!python_d\n')NEWLINE fo.write(pyscript)NEWLINE copy_into(join(dirname(__file__), 'cli-{}.exe'.format(config.arch)),NEWLINE path + '.exe', config.timeout)NEWLINE else:NEWLINE if os.path.islink(path):NEWLINE os.remove(path)NEWLINE with open(path, 'w') as fo:NEWLINE if not config.noarch:NEWLINE fo.write('#!%s\n' % config.build_python)NEWLINE fo.write(pyscript)NEWLINE os.chmod(path, 0o775)NEWLINENEWLINENEWLINEdef create_entry_points(items, config):NEWLINE if not items:NEWLINE returnNEWLINE bin_dir = join(config.host_prefix, bin_dirname)NEWLINE if not isdir(bin_dir):NEWLINE os.mkdir(bin_dir)NEWLINE for cmd, module, func in iter_entry_points(items):NEWLINE create_entry_point(join(bin_dir, cmd), module, func, config)NEWLINENEWLINENEWLINE# Return all files in dir, and all its subdirectories, ending in patternNEWLINEdef get_ext_files(start_path, pattern):NEWLINE for root, _, files in os.walk(start_path):NEWLINE for f in files:NEWLINE if f.endswith(pattern):NEWLINE yield os.path.join(root, f)NEWLINENEWLINENEWLINEdef _func_defaulting_env_to_os_environ(func, *popenargs, **kwargs):NEWLINE if 'env' not in kwargs:NEWLINE kwargs = kwargs.copy()NEWLINE env_copy = os.environ.copy()NEWLINE kwargs.update({'env': env_copy})NEWLINE kwargs['env'] = {str(key): str(value) for key, value in kwargs['env'].items()}NEWLINE _args = []NEWLINE if 'stdin' not in kwargs:NEWLINE kwargs['stdin'] = subprocess.PIPENEWLINE for arg in popenargs:NEWLINE # arguments to subprocess need to be bytestringsNEWLINE if sys.version_info.major < 3 and hasattr(arg, 'encode'):NEWLINE arg = arg.encode(codec)NEWLINE elif sys.version_info.major >= 3 and hasattr(arg, 'decode'):NEWLINE arg = arg.decode(codec)NEWLINE _args.append(str(arg))NEWLINE return func(_args, **kwargs)NEWLINENEWLINENEWLINEdef check_call_env(popenargs, **kwargs):NEWLINE return _func_defaulting_env_to_os_environ(subprocess.check_call, *popenargs, **kwargs)NEWLINENEWLINENEWLINEdef check_output_env(popenargs, **kwargs):NEWLINE return _func_defaulting_env_to_os_environ(subprocess.check_output, *popenargs, **kwargs)\NEWLINE .rstrip()NEWLINENEWLINENEWLINE_posix_exes_cache = {}NEWLINENEWLINENEWLINEdef convert_path_for_cygwin_or_msys2(exe, path):NEWLINE "If exe is a Cygwin or MSYS2 executable then filters it through `cygpath -u`"NEWLINE if sys.platform != 'win32':NEWLINE return pathNEWLINE if exe not in _posix_exes_cache:NEWLINE with open(exe, "rb") as exe_file:NEWLINE exe_binary = exe_file.read()NEWLINE msys2_cygwin = re.findall(b'(cygwin1.dll|msys-2.0.dll)', exe_binary)NEWLINE _posix_exes_cache[exe] = True if msys2_cygwin else FalseNEWLINE if _posix_exes_cache[exe]:NEWLINE try:NEWLINE path = check_output_env(['cygpath', '-u',NEWLINE path]).splitlines()[0].decode(getpreferredencoding())NEWLINE except WindowsError:NEWLINE log = get_logger(__name__)NEWLINE log.debug('cygpath executable not found. Passing native path. This is OK for msys2.')NEWLINE return pathNEWLINENEWLINENEWLINEdef print_skip_message(metadata):NEWLINE print("Skipped: {} defines build/skip for this "NEWLINE "configuration.".format(metadata.path))NEWLINENEWLINENEWLINE@memoizedNEWLINEdef package_has_file(package_path, file_path):NEWLINE try:NEWLINE locks = get_conda_operation_locks()NEWLINE with try_acquire_locks(locks, timeout=90):NEWLINE with tarfile.open(package_path) as t:NEWLINE try:NEWLINE # internal paths are always forward slashed on all platformsNEWLINE file_path = file_path.replace('\\', '/')NEWLINE text = t.extractfile(file_path).read()NEWLINE return textNEWLINE except KeyError:NEWLINE return FalseNEWLINE except OSError as e:NEWLINE raise RuntimeError("Could not extract %s (%s)" % (package_path, e))NEWLINE except tarfile.ReadError:NEWLINE raise RuntimeError("Could not extract metadata from %s. "NEWLINE "File probably corrupt." % package_path)NEWLINENEWLINENEWLINEdef ensure_list(arg):NEWLINE if (isinstance(arg, string_types) or not hasattr(arg, '__iter__')):NEWLINE if arg:NEWLINE arg = [arg]NEWLINE else:NEWLINE arg = []NEWLINE return argNEWLINENEWLINENEWLINE@contextlib.contextmanagerNEWLINEdef tmp_chdir(dest):NEWLINE curdir = os.getcwd()NEWLINE try:NEWLINE os.chdir(dest)NEWLINE yieldNEWLINE finally:NEWLINE os.chdir(curdir)NEWLINENEWLINENEWLINEdef expand_globs(path_list, root_dir):NEWLINE log = get_logger(__name__)NEWLINE files = []NEWLINE for path in path_list:NEWLINE if not os.path.isabs(path):NEWLINE path = os.path.join(root_dir, path)NEWLINE if os.path.islink(path):NEWLINE files.append(path.replace(root_dir + os.path.sep, ''))NEWLINE elif os.path.isdir(path):NEWLINE files.extend(os.path.join(root, f).replace(root_dir + os.path.sep, '')NEWLINE for root, _, fs in os.walk(path) for f in fs)NEWLINE elif os.path.isfile(path):NEWLINE files.append(path.replace(root_dir + os.path.sep, ''))NEWLINE else:NEWLINE # File compared to the globs use / as separator indenpendently of the osNEWLINE glob_files = [f.replace(root_dir + os.path.sep, '')NEWLINE for f in glob(path)]NEWLINE if not glob_files:NEWLINE log.error('invalid recipe path: {}'.format(path))NEWLINE files.extend(glob_files)NEWLINE files = [f.replace(os.path.sep, '/') for f in files]NEWLINE return filesNEWLINENEWLINENEWLINEdef find_recipe(path):NEWLINE """recurse through a folder, locating meta.yaml. Raises error if more than one is found.NEWLINENEWLINE Returns folder containing meta.yaml, to be built.NEWLINENEWLINE If we have a base level meta.yaml and other supplemental ones, use that first"""NEWLINE if os.path.isfile(path) and os.path.basename(path) in ["meta.yaml", "conda.yaml"]:NEWLINE return os.path.dirname(path)NEWLINE results = rec_glob(path, ["meta.yaml", "conda.yaml"])NEWLINE if len(results) > 1:NEWLINE base_recipe = os.path.join(path, "meta.yaml")NEWLINE if base_recipe in results:NEWLINE get_logger(__name__).warn("Multiple meta.yaml files found. "NEWLINE "The meta.yaml file in the base directory "NEWLINE "will be used.")NEWLINE results = [base_recipe]NEWLINE else:NEWLINE raise IOError("More than one meta.yaml files found in %s" % path)NEWLINE elif not results:NEWLINE raise IOError("No meta.yaml or conda.yaml files found in %s" % path)NEWLINE return results[0]NEWLINENEWLINENEWLINEclass LoggingContext(object):NEWLINE loggers = ['conda', 'binstar', 'install', 'conda.install', 'fetch', 'conda.instructions',NEWLINE 'fetch.progress', 'print', 'progress', 'dotupdate', 'stdoutlog', 'requests',NEWLINE 'conda.core.package_cache', 'conda.plan', 'conda.gateways.disk.delete']NEWLINENEWLINE def __init__(self, level=logging.WARN, handler=None, close=True):NEWLINE self.level = levelNEWLINE self.old_levels = {}NEWLINE self.handler = handlerNEWLINE self.close = closeNEWLINENEWLINE def __enter__(self):NEWLINE for logger in LoggingContext.loggers:NEWLINE log = logging.getLogger(logger)NEWLINE self.old_levels[logger] = log.levelNEWLINE log.setLevel(self.level if ('install' not in logger orNEWLINE self.level < logging.INFO) else self.level + 10)NEWLINE if self.handler:NEWLINE self.logger.addHandler(self.handler)NEWLINENEWLINE def __exit__(self, et, ev, tb):NEWLINE for logger, level in self.old_levels.items():NEWLINE logging.getLogger(logger).setLevel(level)NEWLINE if self.handler:NEWLINE self.logger.removeHandler(self.handler)NEWLINE if self.handler and self.close:NEWLINE self.handler.close()NEWLINE # implicit return of None => don't swallow exceptionsNEWLINENEWLINENEWLINEdef get_installed_packages(path):NEWLINE '''NEWLINE Scan all json files in 'path' and return a dictionary with their contents.NEWLINE Files are assumed to be in 'index.json' format.NEWLINE '''NEWLINE installed = dict()NEWLINE for filename in glob(os.path.join(path, 'conda-meta', '*.json')):NEWLINE with open(filename) as file:NEWLINE data = json.load(file)NEWLINE installed[data['name']] = dataNEWLINE return installedNEWLINENEWLINENEWLINEdef _convert_lists_to_sets(_dict):NEWLINE for k, v in _dict.items():NEWLINE if hasattr(v, 'keys'):NEWLINE _dict[k] = HashableDict(_convert_lists_to_sets(v))NEWLINE elif hasattr(v, '__iter__') and not isinstance(v, string_types):NEWLINE _dict[k] = sorted(list(set(v)))NEWLINE return _dictNEWLINENEWLINENEWLINEclass HashableDict(dict):NEWLINE """use hashable frozen dictionaries for resources and resource types so that they can be in setsNEWLINE """NEWLINE def __init__(self, *args, **kwargs):NEWLINE super(HashableDict, self).__init__(*args, **kwargs)NEWLINE self = _convert_lists_to_sets(self)NEWLINENEWLINE def __hash__(self):NEWLINE return hash(json.dumps(self, sort_keys=True))NEWLINENEWLINENEWLINEdef represent_hashabledict(dumper, data):NEWLINE value = []NEWLINENEWLINE for item_key, item_value in data.items():NEWLINE node_key = dumper.represent_data(item_key)NEWLINE node_value = dumper.represent_data(item_value)NEWLINENEWLINE value.append((node_key, node_value))NEWLINENEWLINE return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)NEWLINENEWLINENEWLINEyaml.add_representer(HashableDict, represent_hashabledict)NEWLINENEWLINENEWLINE# http://stackoverflow.com/a/10743550/1170370NEWLINE@contextlib.contextmanagerNEWLINEdef capture():NEWLINE import sysNEWLINE oldout, olderr = sys.stdout, sys.stderrNEWLINE try:NEWLINE out = [StringIO(), StringIO()]NEWLINE sys.stdout, sys.stderr = outNEWLINE yield outNEWLINE finally:NEWLINE sys.stdout, sys.stderr = oldout, olderrNEWLINE out[0] = out[0].getvalue()NEWLINE out[1] = out[1].getvalue()NEWLINENEWLINENEWLINE# copied from conda; added in 4.3, not currently part of exported functionalityNEWLINE@contextlib.contextmanagerNEWLINEdef env_var(name, value, callback=None):NEWLINE # NOTE: will likely want to call reset_context() when using this function, so passNEWLINE # it as callbackNEWLINE name, value = str(name), str(value)NEWLINE saved_env_var = os.environ.get(name)NEWLINE try:NEWLINE os.environ[name] = valueNEWLINE if callback:NEWLINE callback()NEWLINE yieldNEWLINE finally:NEWLINE if saved_env_var:NEWLINE os.environ[name] = saved_env_varNEWLINE else:NEWLINE del os.environ[name]NEWLINE if callback:NEWLINE callback()NEWLINENEWLINENEWLINEdef collect_channels(config, is_host=False):NEWLINE urls = [url_path(config.croot)] + get_rc_urls() + ['local', ]NEWLINE if config.channel_urls:NEWLINE urls.extend(config.channel_urls)NEWLINE # defaults has a very limited set of repo urls. Omit it from the URL list soNEWLINE # that it doesn't fail.NEWLINE if config.is_cross and is_host:NEWLINE urls.remove('defaults')NEWLINE urls.remove('local')NEWLINE return urlsNEWLINENEWLINENEWLINEdef trim_empty_keys(dict_):NEWLINE to_remove = set()NEWLINE negative_means_empty = ('final', 'noarch_python')NEWLINE for k, v in dict_.items():NEWLINE if hasattr(v, 'keys'):NEWLINE trim_empty_keys(v)NEWLINE # empty lists and empty strings, and None are always empty.NEWLINE if v == list() or v == '' or v is None or v == dict():NEWLINE to_remove.add(k)NEWLINE # other things that evaluate as False may not be "empty" - things can be manually set toNEWLINE # false, and we need to keep that setting.NEWLINE if not v and k in negative_means_empty:NEWLINE to_remove.add(k)NEWLINE for k in to_remove:NEWLINE del dict_[k]NEWLINENEWLINENEWLINEdef conda_43():NEWLINE """Conda 4.3 broke compatibility in lots of new fun and exciting ways. This function is forNEWLINE changing conda-build's behavior when conda 4.3 or higher is installed."""NEWLINE return LooseVersion(conda_version) >= LooseVersion('4.3')NEWLINENEWLINENEWLINEdef _increment(version, alpha_ver):NEWLINE try:NEWLINE if alpha_ver:NEWLINE suffix = 'a'NEWLINE else:NEWLINE suffix = '.0a0'NEWLINE last_version = str(int(version) + 1) + suffixNEWLINE except ValueError:NEWLINE last_version = chr(ord(version) + 1)NEWLINE return last_versionNEWLINENEWLINENEWLINEdef apply_pin_expressions(version, min_pin='x.x.x.x.x.x.x', max_pin='x'):NEWLINE pins = [len(p.split('.')) if p else None for p in (min_pin, max_pin)]NEWLINE parsed_version = VersionOrder(version).version[1:]NEWLINE nesting_position = NoneNEWLINE flat_list = []NEWLINE for idx, item in enumerate(parsed_version):NEWLINE if isinstance(item, list):NEWLINE nesting_position = idxNEWLINE flat_list.extend(item)NEWLINE else:NEWLINE flat_list.append(item)NEWLINE versions = ['', '']NEWLINE # first idx is lower bound pin; second is upper bound pin.NEWLINE # pin value is number of places to pin.NEWLINE for p_idx, pin in enumerate(pins):NEWLINE if pin:NEWLINE # flat_list is the blown-out representation of the versionNEWLINE for v_idx, v in enumerate(flat_list[:pin]):NEWLINE # upper bound pinNEWLINE if p_idx == 1 and v_idx == pin - 1:NEWLINE # is the last place an alphabetic character? OpenSSL, JPEGNEWLINE alpha_ver = str(flat_list[min(pin, len(flat_list) - 1)]).isalpha()NEWLINE v = _increment(v, alpha_ver)NEWLINE versions[p_idx] += str(v)NEWLINE if v_idx != nesting_position:NEWLINE versions[p_idx] += '.'NEWLINE if versions[p_idx][-1] == '.':NEWLINE versions[p_idx] = versions[p_idx][:-1]NEWLINE if versions[0]:NEWLINE versions[0] = '>=' + versions[0]NEWLINE if versions[1]:NEWLINE versions[1] = '<' + versions[1]NEWLINE return ','.join([v for v in versions if v])NEWLINENEWLINENEWLINEdef filter_files(files_list, prefix, filter_patterns=('(.*[\\\\/])?\.git[\\\\/].*',NEWLINE '(.*[\\\\/])?\.git$',NEWLINE '(.*)?\.DS_Store.*',NEWLINE '(.*)?\.gitignore',NEWLINE 'conda-meta.*',NEWLINE '(.*)?\.gitmodules')):NEWLINE """Remove things like .git from the list of files to be copied"""NEWLINE for pattern in filter_patterns:NEWLINE r = re.compile(pattern)NEWLINE files_list = set(files_list) - set(filter(r.match, files_list))NEWLINE return [f.replace(prefix + os.path.sep, '') for f in files_listNEWLINE if not os.path.isdir(os.path.join(prefix, f)) orNEWLINE os.path.islink(os.path.join(prefix, f))]NEWLINENEWLINENEWLINEdef rm_rf(path, config=None):NEWLINE if on_win:NEWLINE # native windows delete is potentially much fasterNEWLINE try:NEWLINE if os.path.isfile(path):NEWLINE subprocess.check_call('del {}'.format(path), shell=True)NEWLINE elif os.path.isdir(path):NEWLINE subprocess.check_call('rd /s /q {}'.format(path), shell=True)NEWLINE else:NEWLINE passNEWLINE except subprocess.CalledProcessError:NEWLINE passNEWLINE conda_log_level = logging.WARNNEWLINE if config and config.debug:NEWLINE conda_log_level = logging.DEBUGNEWLINE with LoggingContext(conda_log_level):NEWLINE _rm_rf(path)NEWLINENEWLINENEWLINE# https://stackoverflow.com/a/31459386/1170370NEWLINEclass LessThanFilter(logging.Filter):NEWLINE def __init__(self, exclusive_maximum, name=""):NEWLINE super(LessThanFilter, self).__init__(name)NEWLINE self.max_level = exclusive_maximumNEWLINENEWLINE def filter(self, record):NEWLINE # non-zero return means we log this messageNEWLINE return 1 if record.levelno < self.max_level else 0NEWLINENEWLINENEWLINEclass GreaterThanFilter(logging.Filter):NEWLINE def __init__(self, exclusive_minimum, name=""):NEWLINE super(GreaterThanFilter, self).__init__(name)NEWLINE self.min_level = exclusive_minimumNEWLINENEWLINE def filter(self, record):NEWLINE # non-zero return means we log this messageNEWLINE return 1 if record.levelno > self.min_level else 0NEWLINENEWLINENEWLINE# unclutter logs - show messages only onceNEWLINEclass DuplicateFilter(logging.Filter):NEWLINE def __init__(self):NEWLINE self.msgs = set()NEWLINENEWLINE def filter(self, record):NEWLINE log = record.msg not in self.msgsNEWLINE self.msgs.add(record.msg)NEWLINE return int(log)NEWLINENEWLINENEWLINEdedupe_filter = DuplicateFilter()NEWLINEinfo_debug_stdout_filter = LessThanFilter(logging.WARNING)NEWLINEwarning_error_stderr_filter = GreaterThanFilter(logging.INFO)NEWLINENEWLINENEWLINEdef get_logger(name, level=logging.INFO, dedupe=True, add_stdout_stderr_handlers=True):NEWLINE config_file = cc_conda_build.get('log_config_file')NEWLINE # by loading config file here, and then only adding handlers later, peopleNEWLINE # should be able to override conda-build's logger settings here.NEWLINE if config_file:NEWLINE with open(config_file) as f:NEWLINE config_dict = yaml.safe_load(f)NEWLINE logging.config.dictConfig(config_dict)NEWLINE level = config_dict.get('loggers', {}).get(name, {}).get('level', level)NEWLINE log = logging.getLogger(name)NEWLINE log.setLevel(level)NEWLINE if dedupe:NEWLINE log.addFilter(dedupe_filter)NEWLINENEWLINE # these are defaults. They can be overridden by configuring a log config yaml file.NEWLINE if not log.handlers and add_stdout_stderr_handlers:NEWLINE stdout_handler = logging.StreamHandler(sys.stdout)NEWLINE stderr_handler = logging.StreamHandler(sys.stderr)NEWLINE stdout_handler.addFilter(info_debug_stdout_filter)NEWLINE stderr_handler.addFilter(warning_error_stderr_filter)NEWLINE stdout_handler.setLevel(level)NEWLINE stderr_handler.setLevel(level)NEWLINE log.addHandler(stdout_handler)NEWLINE log.addHandler(stderr_handler)NEWLINE return logNEWLINENEWLINENEWLINEdef _equivalent(base_value, value, path):NEWLINE equivalent = value == base_valueNEWLINE if isinstance(value, string_types) and isinstance(base_value, string_types):NEWLINE if not os.path.isabs(base_value):NEWLINE base_value = os.path.abspath(os.path.normpath(os.path.join(path, base_value)))NEWLINE if not os.path.isabs(value):NEWLINE value = os.path.abspath(os.path.normpath(os.path.join(path, value)))NEWLINE equivalent |= base_value == valueNEWLINE return equivalentNEWLINENEWLINENEWLINEdef merge_or_update_dict(base, new, path, merge, raise_on_clobber=False):NEWLINE log = get_logger(__name__)NEWLINE for key, value in new.items():NEWLINE base_value = base.get(key, value)NEWLINE if hasattr(value, 'keys'):NEWLINE base_value = merge_or_update_dict(base_value, value, path, merge,NEWLINE raise_on_clobber=raise_on_clobber)NEWLINE base[key] = base_valueNEWLINE elif hasattr(value, '__iter__') and not isinstance(value, string_types):NEWLINE if merge:NEWLINE if base_value and base_value != value:NEWLINE base_value.extend(value)NEWLINE try:NEWLINE base[key] = list(set(base_value))NEWLINE except TypeError:NEWLINE base[key] = base_valueNEWLINE else:NEWLINE base[key] = valueNEWLINE else:NEWLINE if (base_value and merge and not _equivalent(base_value, value, path) andNEWLINE raise_on_clobber):NEWLINE log.debug('clobbering key {} (original value {}) with value {}'.format(key,NEWLINE base_value, value))NEWLINE base[key] = valueNEWLINE return baseNEWLINENEWLINENEWLINEdef merge_dicts_of_lists(dol1, dol2):NEWLINE '''NEWLINE From Alex Martelli: https://stackoverflow.com/a/1495821/3257826NEWLINE '''NEWLINE keys = set(dol1).union(dol2)NEWLINE no = []NEWLINE return dict((k, dol1.get(k, no) + dol2.get(k, no)) for k in keys)NEWLINENEWLINENEWLINEdef prefix_files(prefix):NEWLINE '''NEWLINE Returns a set of all files in prefix.NEWLINE '''NEWLINE res = set()NEWLINE for root, dirs, files in os.walk(prefix):NEWLINE for fn in files:NEWLINE res.add(join(root, fn)[len(prefix) + 1:])NEWLINE for dn in dirs:NEWLINE path = join(root, dn)NEWLINE if islink(path):NEWLINE res.add(path[len(prefix) + 1:])NEWLINE res = set(expand_globs(res, prefix))NEWLINE return resNEWLINENEWLINENEWLINEdef mmap_mmap(fileno, length, tagname=None, flags=0, prot=mmap_PROT_READ | mmap_PROT_WRITE,NEWLINE access=None, offset=0):NEWLINE '''NEWLINE Hides the differences between mmap.mmap on Windows and Unix.NEWLINE Windows has `tagname`.NEWLINE Unix does not, but makes up for it with `flags` and `prot`.NEWLINE On both, the defaule value for `access` is determined from how the fileNEWLINE was opened so must not be passed in at all to get this default behaviourNEWLINE '''NEWLINE if on_win:NEWLINE if access:NEWLINE return mmap.mmap(fileno, length, tagname=tagname, access=access, offset=offset)NEWLINE else:NEWLINE return mmap.mmap(fileno, length, tagname=tagname)NEWLINE else:NEWLINE if access:NEWLINE return mmap.mmap(fileno, length, flags=flags, prot=prot, access=access, offset=offset)NEWLINE else:NEWLINE return mmap.mmap(fileno, length, flags=flags, prot=prot)NEWLINENEWLINENEWLINEdef remove_pycache_from_scripts(build_prefix):NEWLINE """Remove pip created pycache directory from bin or Scripts."""NEWLINE if on_win:NEWLINE scripts_path = os.path.join(build_prefix, 'Scripts')NEWLINE else:NEWLINE scripts_path = os.path.join(build_prefix, 'bin')NEWLINENEWLINE for entry in os.listdir(scripts_path):NEWLINE entry_path = os.path.join(scripts_path, entry)NEWLINE if os.path.isdir(entry_path) and entry.strip(os.sep) == '__pycache__':NEWLINE shutil.rmtree(entry_path)NEWLINENEWLINE elif os.path.isfile(entry_path) and entry_path.endswith('.pyc'):NEWLINE os.remove(entry_path)NEWLINENEWLINENEWLINEdef sort_list_in_nested_structure(dictionary, omissions=''):NEWLINE """Recurse through a nested dictionary and sort any lists that are found.NEWLINENEWLINE If the list that is found contains anything but strings, it is skippedNEWLINE as we can't compare lists containing different types. The omissions argumentNEWLINE allows for certain sections of the dictionary to be omitted from sorting.NEWLINE """NEWLINE for field, value in dictionary.items():NEWLINE if isinstance(value, dict):NEWLINE for key in value.keys():NEWLINE section = dictionary[field][key]NEWLINE if isinstance(section, dict):NEWLINE sort_list_in_nested_structure(section)NEWLINE elif (isinstance(section, list) andNEWLINE '{}/{}' .format(field, key) not in omissions andNEWLINE all(isinstance(item, str) for item in section)):NEWLINE section.sort()NEWLINENEWLINE # there's a possibility for nested lists containing dictionariesNEWLINE # in this case we recurse until we find a list to sortNEWLINE elif isinstance(value, list):NEWLINE for element in value:NEWLINE if isinstance(element, dict):NEWLINE sort_list_in_nested_structure(element)NEWLINE try:NEWLINE value.sort()NEWLINE except TypeError:NEWLINE passNEWLINENEWLINENEWLINE# group one: package nameNEWLINE# group two: version (allows _, +, . in version)NEWLINE# group three: build string - mostly not used here. Match primarily mattersNEWLINE# to specify when not to add .*NEWLINENEWLINE# if you are seeing mysterious unsatisfiable errors, with the package you're building being theNEWLINE# unsatisfiable part, then you probably need to update this regex.NEWLINENEWLINEspec_needing_star_re = re.compile("([0-9a-zA-Z\.\-\_]+)\s+([0-9a-zA-Z\.\+\_]+)(\s+[0-9a-zA-Z\.\_]+)?") # NOQANEWLINEspec_ver_needing_star_re = re.compile("^([0-9a-zA-Z\.]+)$")NEWLINENEWLINENEWLINEdef ensure_valid_spec(spec):NEWLINE if isinstance(spec, MatchSpec):NEWLINE if (hasattr(spec, 'version') and spec.version andNEWLINE spec_ver_needing_star_re.match(str(spec.version))):NEWLINE if str(spec.name) not in ('python', 'numpy') or str(spec.version) != 'x.x':NEWLINE spec = MatchSpec("{} {}".format(str(spec.name), str(spec.version) + '.*'))NEWLINE else:NEWLINE match = spec_needing_star_re.match(spec)NEWLINE # ignore exact pins (would be a 3rd group)NEWLINE if match and not match.group(3):NEWLINE if match.group(1) in ('python', 'numpy') and match.group(2) == 'x.x':NEWLINE spec = spec_needing_star_re.sub(r"\1 \2", spec)NEWLINE else:NEWLINE if "*" not in spec:NEWLINE spec = spec_needing_star_re.sub(r"\1 \2.*", spec)NEWLINE return specNEWLINENEWLINENEWLINEdef insert_variant_versions(metadata, env):NEWLINE reqs = metadata.get_value('requirements/' + env)NEWLINE for key, val in metadata.config.variant.items():NEWLINE regex = re.compile(r'^(%s)(?:\s*$)' % key.replace('_', '[-_]'))NEWLINE matches = [regex.match(pkg) for pkg in reqs]NEWLINE if any(matches):NEWLINE for i, x in enumerate(matches):NEWLINE if x:NEWLINE del reqs[i]NEWLINE reqs.insert(i, ensure_valid_spec(' '.join((x.group(1), val))))NEWLINENEWLINE xx_re = re.compile("([0-9a-zA-Z\.\-\_]+)\s+x\.x")NEWLINENEWLINE matches = [xx_re.match(pkg) for pkg in reqs]NEWLINE if any(matches):NEWLINE for i, x in enumerate(matches):NEWLINE if x:NEWLINE del reqs[i]NEWLINE reqs.insert(i, ensure_valid_spec(' '.join((x.group(1),NEWLINE metadata.config.variant.get(x.group(1))))))NEWLINE metadata.meta['requirements'][env] = reqsNEWLINE
# coding: utf-8NEWLINENEWLINE"""NEWLINE SendinBlue APINEWLINENEWLINE SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | | 406 | Error. Not Acceptable | # noqa: E501NEWLINENEWLINE OpenAPI spec version: 3.0.0NEWLINE Contact: contact@sendinblue.comNEWLINE Generated by: https://github.com/swagger-api/swagger-codegen.gitNEWLINE"""NEWLINENEWLINENEWLINEimport pprintNEWLINEimport re # noqa: F401NEWLINENEWLINEimport sixNEWLINENEWLINENEWLINEclass GetSmsCampaigns(object):NEWLINE """NOTE: This class is auto generated by the swagger code generator program.NEWLINENEWLINE Do not edit the class manually.NEWLINE """NEWLINENEWLINE """NEWLINE Attributes:NEWLINE swagger_types (dict): The key is attribute nameNEWLINE and the value is attribute type.NEWLINE attribute_map (dict): The key is attribute nameNEWLINE and the value is json key in definition.NEWLINE """NEWLINE swagger_types = {NEWLINE 'campaigns': 'list[object]',NEWLINE 'count': 'int'NEWLINE }NEWLINENEWLINE attribute_map = {NEWLINE 'campaigns': 'campaigns',NEWLINE 'count': 'count'NEWLINE }NEWLINENEWLINE def __init__(self, campaigns=None, count=None): # noqa: E501NEWLINE """GetSmsCampaigns - a model defined in Swagger""" # noqa: E501NEWLINENEWLINE self._campaigns = NoneNEWLINE self._count = NoneNEWLINE self.discriminator = NoneNEWLINENEWLINE if campaigns is not None:NEWLINE self.campaigns = campaignsNEWLINE if count is not None:NEWLINE self.count = countNEWLINENEWLINE @propertyNEWLINE def campaigns(self):NEWLINE """Gets the campaigns of this GetSmsCampaigns. # noqa: E501NEWLINENEWLINENEWLINE :return: The campaigns of this GetSmsCampaigns. # noqa: E501NEWLINE :rtype: list[object]NEWLINE """NEWLINE return self._campaignsNEWLINENEWLINE @campaigns.setterNEWLINE def campaigns(self, campaigns):NEWLINE """Sets the campaigns of this GetSmsCampaigns.NEWLINENEWLINENEWLINE :param campaigns: The campaigns of this GetSmsCampaigns. # noqa: E501NEWLINE :type: list[object]NEWLINE """NEWLINENEWLINE self._campaigns = campaignsNEWLINENEWLINE @propertyNEWLINE def count(self):NEWLINE """Gets the count of this GetSmsCampaigns. # noqa: E501NEWLINENEWLINE Number of SMS campaigns retrieved # noqa: E501NEWLINENEWLINE :return: The count of this GetSmsCampaigns. # noqa: E501NEWLINE :rtype: intNEWLINE """NEWLINE return self._countNEWLINENEWLINE @count.setterNEWLINE def count(self, count):NEWLINE """Sets the count of this GetSmsCampaigns.NEWLINENEWLINE Number of SMS campaigns retrieved # noqa: E501NEWLINENEWLINE :param count: The count of this GetSmsCampaigns. # noqa: E501NEWLINE :type: intNEWLINE """NEWLINENEWLINE self._count = countNEWLINENEWLINE def to_dict(self):NEWLINE """Returns the model properties as a dict"""NEWLINE result = {}NEWLINENEWLINE for attr, _ in six.iteritems(self.swagger_types):NEWLINE value = getattr(self, attr)NEWLINE if isinstance(value, list):NEWLINE result[attr] = list(map(NEWLINE lambda x: x.to_dict() if hasattr(x, "to_dict") else x,NEWLINE valueNEWLINE ))NEWLINE elif hasattr(value, "to_dict"):NEWLINE result[attr] = value.to_dict()NEWLINE elif isinstance(value, dict):NEWLINE result[attr] = dict(map(NEWLINE lambda item: (item[0], item[1].to_dict())NEWLINE if hasattr(item[1], "to_dict") else item,NEWLINE value.items()NEWLINE ))NEWLINE else:NEWLINE result[attr] = valueNEWLINE if issubclass(GetSmsCampaigns, dict):NEWLINE for key, value in self.items():NEWLINE result[key] = valueNEWLINENEWLINE return resultNEWLINENEWLINE def to_str(self):NEWLINE """Returns the string representation of the model"""NEWLINE return pprint.pformat(self.to_dict())NEWLINENEWLINE def __repr__(self):NEWLINE """For `print` and `pprint`"""NEWLINE return self.to_str()NEWLINENEWLINE def __eq__(self, other):NEWLINE """Returns true if both objects are equal"""NEWLINE if not isinstance(other, GetSmsCampaigns):NEWLINE return FalseNEWLINENEWLINE return self.__dict__ == other.__dict__NEWLINENEWLINE def __ne__(self, other):NEWLINE """Returns true if both objects are not equal"""NEWLINE return not self == otherNEWLINE
import numpy as npNEWLINEimport unittestNEWLINEimport sysNEWLINEsys.path.append("tests/python")NEWLINE# Don't import the test class, otherwise they will run twice.NEWLINEimport test_interaction_constraints as test_icNEWLINErng = np.random.RandomState(1994)NEWLINENEWLINENEWLINEclass TestGPUInteractionConstraints(unittest.TestCase):NEWLINE cputest = test_ic.TestInteractionConstraints()NEWLINENEWLINE def test_interaction_constraints(self):NEWLINE self.cputest.test_interaction_constraints(tree_method='gpu_hist')NEWLINENEWLINE def test_training_accuracy(self):NEWLINE self.cputest.test_training_accuracy(tree_method='gpu_hist')NEWLINE
#!/usr/bin/pythonNEWLINE#NEWLINE# Copyright: Ansible ProjectNEWLINE# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)NEWLINENEWLINEfrom __future__ import absolute_import, division, print_functionNEWLINE__metaclass__ = typeNEWLINENEWLINEANSIBLE_METADATA = {'metadata_version': '1.1',NEWLINE 'status': ['preview'],NEWLINE 'supported_by': 'community'}NEWLINENEWLINEDOCUMENTATION = '''NEWLINE---NEWLINEmodule: onyx_vlanNEWLINEauthor: "Samer Deeb (@samerd) Alex Tabachnik (@atabachnik)"NEWLINEshort_description: Manage VLANs on Mellanox ONYX network devicesNEWLINEdescription:NEWLINE - This module provides declarative management of VLANsNEWLINE on Mellanox ONYX network devices.NEWLINEoptions:NEWLINE name:NEWLINE description:NEWLINE - Name of the VLAN.NEWLINE vlan_id:NEWLINE description:NEWLINE - ID of the VLAN.NEWLINE aggregate:NEWLINE description: List of VLANs definitions.NEWLINE purge:NEWLINE description:NEWLINE - Purge VLANs not defined in the I(aggregate) parameter.NEWLINE default: noNEWLINE type: boolNEWLINE state:NEWLINE description:NEWLINE - State of the VLAN configuration.NEWLINE default: presentNEWLINE choices: ['present', 'absent']NEWLINE'''NEWLINENEWLINEEXAMPLES = """NEWLINE- name: configure VLAN ID and nameNEWLINE onyx_vlan:NEWLINE vlan_id: 20NEWLINE name: test-vlanNEWLINENEWLINE- name: remove configurationNEWLINE onyx_vlan:NEWLINE state: absentNEWLINE"""NEWLINENEWLINERETURN = """NEWLINEcommands:NEWLINE description: The list of configuration mode commands to send to the deviceNEWLINE returned: always.NEWLINE type: listNEWLINE sample:NEWLINE - vlan 20NEWLINE - name test-vlanNEWLINE - exitNEWLINE"""NEWLINENEWLINEfrom copy import deepcopyNEWLINENEWLINEfrom ansible.module_utils.basic import AnsibleModuleNEWLINEfrom ansible.module_utils.six import iteritemsNEWLINEfrom ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import remove_default_specNEWLINENEWLINEfrom ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import BaseOnyxModuleNEWLINEfrom ansible_collections.community.general.plugins.module_utils.network.onyx.onyx import show_cmdNEWLINENEWLINENEWLINEclass OnyxVlanModule(BaseOnyxModule):NEWLINE _purge = FalseNEWLINENEWLINE @classmethodNEWLINE def _get_element_spec(cls):NEWLINE return dict(NEWLINE vlan_id=dict(type='int'),NEWLINE name=dict(type='str'),NEWLINE state=dict(default='present', choices=['present', 'absent']),NEWLINE )NEWLINENEWLINE @classmethodNEWLINE def _get_aggregate_spec(cls, element_spec):NEWLINE aggregate_spec = deepcopy(element_spec)NEWLINE aggregate_spec['vlan_id'] = dict(required=True)NEWLINENEWLINE # remove default in aggregate spec, to handle common argumentsNEWLINE remove_default_spec(aggregate_spec)NEWLINE return aggregate_specNEWLINENEWLINE def init_module(self):NEWLINE """ module initializationNEWLINE """NEWLINE element_spec = self._get_element_spec()NEWLINE aggregate_spec = self._get_aggregate_spec(element_spec)NEWLINE argument_spec = dict(NEWLINE aggregate=dict(type='list', elements='dict',NEWLINE options=aggregate_spec),NEWLINE purge=dict(default=False, type='bool'),NEWLINE )NEWLINE argument_spec.update(element_spec)NEWLINE required_one_of = [['vlan_id', 'aggregate']]NEWLINE mutually_exclusive = [['vlan_id', 'aggregate']]NEWLINE self._module = AnsibleModule(NEWLINE argument_spec=argument_spec,NEWLINE required_one_of=required_one_of,NEWLINE mutually_exclusive=mutually_exclusive,NEWLINE supports_check_mode=True)NEWLINENEWLINE def validate_vlan_id(self, value):NEWLINE if value and not 1 <= int(value) <= 4094:NEWLINE self._module.fail_json(msg='vlan id must be between 1 and 4094')NEWLINENEWLINE def get_required_config(self):NEWLINE self._required_config = list()NEWLINE module_params = self._module.paramsNEWLINE aggregate = module_params.get('aggregate')NEWLINE self._purge = module_params.get('purge', False)NEWLINE if aggregate:NEWLINE for item in aggregate:NEWLINE for key in item:NEWLINE if item.get(key) is None:NEWLINE item[key] = module_params[key]NEWLINE self.validate_param_values(item, item)NEWLINE req_item = item.copy()NEWLINE req_item['vlan_id'] = int(req_item['vlan_id'])NEWLINE self._required_config.append(req_item)NEWLINE else:NEWLINE params = {NEWLINE 'vlan_id': module_params['vlan_id'],NEWLINE 'name': module_params['name'],NEWLINE 'state': module_params['state'],NEWLINE }NEWLINE self.validate_param_values(params)NEWLINE self._required_config.append(params)NEWLINENEWLINE def _create_vlan_data(self, vlan_id, vlan_data):NEWLINE if self._os_version >= self.ONYX_API_VERSION:NEWLINE vlan_data = vlan_data[0]NEWLINE return {NEWLINE 'vlan_id': vlan_id,NEWLINE 'name': self.get_config_attr(vlan_data, 'Name')NEWLINE }NEWLINENEWLINE def _get_vlan_config(self):NEWLINE return show_cmd(self._module, "show vlan")NEWLINENEWLINE def load_current_config(self):NEWLINE # called in base class in run functionNEWLINE self._os_version = self._get_os_version()NEWLINE self._current_config = dict()NEWLINE vlan_config = self._get_vlan_config()NEWLINE if not vlan_config:NEWLINE returnNEWLINE for vlan_id, vlan_data in iteritems(vlan_config):NEWLINE try:NEWLINE vlan_id = int(vlan_id)NEWLINE except ValueError:NEWLINE continueNEWLINE self._current_config[vlan_id] = \NEWLINE self._create_vlan_data(vlan_id, vlan_data)NEWLINENEWLINE def generate_commands(self):NEWLINE req_vlans = set()NEWLINE for req_conf in self._required_config:NEWLINE state = req_conf['state']NEWLINE vlan_id = req_conf['vlan_id']NEWLINE if state == 'absent':NEWLINE if vlan_id in self._current_config:NEWLINE self._commands.append('no vlan %s' % vlan_id)NEWLINE else:NEWLINE req_vlans.add(vlan_id)NEWLINE self._generate_vlan_commands(vlan_id, req_conf)NEWLINE if self._purge:NEWLINE for vlan_id in self._current_config:NEWLINE if vlan_id not in req_vlans:NEWLINE self._commands.append('no vlan %s' % vlan_id)NEWLINENEWLINE def _generate_vlan_commands(self, vlan_id, req_conf):NEWLINE curr_vlan = self._current_config.get(vlan_id, {})NEWLINE if not curr_vlan:NEWLINE self._commands.append("vlan %s" % vlan_id)NEWLINE self._commands.append("exit")NEWLINE req_name = req_conf['name']NEWLINE curr_name = curr_vlan.get('name')NEWLINE if req_name:NEWLINE if req_name != curr_name:NEWLINE self._commands.append("vlan %s name %s" % (vlan_id, req_name))NEWLINE elif req_name is not None:NEWLINE if curr_name:NEWLINE self._commands.append("vlan %s no name" % vlan_id)NEWLINENEWLINENEWLINEdef main():NEWLINE """ main entry point for module executionNEWLINE """NEWLINE OnyxVlanModule.main()NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE main()NEWLINE
# coding: utf-8NEWLINENEWLINE"""NEWLINE Home Connect APINEWLINENEWLINE This API provides access to home appliances enabled by Home Connect (https://home-connect.com). Through the API programs can be started and stopped, or home appliances configured and monitored. For instance, you can start a cotton program on a washer and get a notification when the cycle is complete. To get started with this web client, visit https://developer.home-connect.com and register an account. An application with a client ID for this API client will be automatically generated for you. In order to use this API in your own client, you need an OAuth 2 client implementing the authorization code grant flow (https://developer.home-connect.com/docs/authorization/flow). More details can be found here: https://www.rfc-editor.org/rfc/rfc6749.txt Authorization URL: https://api.home-connect.com/security/oauth/authorize Token URL: https://api.home-connect.com/security/oauth/token # noqa: E501NEWLINENEWLINE OpenAPI spec version: 1NEWLINE NEWLINE Generated by: https://github.com/swagger-api/swagger-codegen.gitNEWLINE"""NEWLINENEWLINEimport pprintNEWLINEimport re # noqa: F401NEWLINENEWLINEimport sixNEWLINENEWLINENEWLINEclass ArrayOfOptions(object):NEWLINE """NOTE: This class is auto generated by the swagger code generator program.NEWLINENEWLINE Do not edit the class manually.NEWLINE """NEWLINE """NEWLINE Attributes:NEWLINE swagger_types (dict): The key is attribute nameNEWLINE and the value is attribute type.NEWLINE attribute_map (dict): The key is attribute nameNEWLINE and the value is json key in definition.NEWLINE """NEWLINE swagger_types = {NEWLINE 'data': 'object'NEWLINE }NEWLINENEWLINE attribute_map = {NEWLINE 'data': 'data'NEWLINE }NEWLINENEWLINE def __init__(self, data=None): # noqa: E501NEWLINE """ArrayOfOptions - a model defined in Swagger""" # noqa: E501NEWLINE self._data = NoneNEWLINE self.discriminator = NoneNEWLINE self.data = dataNEWLINENEWLINE @propertyNEWLINE def data(self):NEWLINE """Gets the data of this ArrayOfOptions. # noqa: E501NEWLINENEWLINENEWLINE :return: The data of this ArrayOfOptions. # noqa: E501NEWLINE :rtype: objectNEWLINE """NEWLINE return self._dataNEWLINENEWLINE @data.setterNEWLINE def data(self, data):NEWLINE """Sets the data of this ArrayOfOptions.NEWLINENEWLINENEWLINE :param data: The data of this ArrayOfOptions. # noqa: E501NEWLINE :type: objectNEWLINE """NEWLINE if data is None:NEWLINE raise ValueError("Invalid value for `data`, must not be `None`") # noqa: E501NEWLINENEWLINE self._data = dataNEWLINENEWLINE def to_dict(self):NEWLINE """Returns the model properties as a dict"""NEWLINE result = {}NEWLINENEWLINE for attr, _ in six.iteritems(self.swagger_types):NEWLINE value = getattr(self, attr)NEWLINE if isinstance(value, list):NEWLINE result[attr] = list(map(NEWLINE lambda x: x.to_dict() if hasattr(x, "to_dict") else x,NEWLINE valueNEWLINE ))NEWLINE elif hasattr(value, "to_dict"):NEWLINE result[attr] = value.to_dict()NEWLINE elif isinstance(value, dict):NEWLINE result[attr] = dict(map(NEWLINE lambda item: (item[0], item[1].to_dict())NEWLINE if hasattr(item[1], "to_dict") else item,NEWLINE value.items()NEWLINE ))NEWLINE else:NEWLINE result[attr] = valueNEWLINE if issubclass(ArrayOfOptions, dict):NEWLINE for key, value in self.items():NEWLINE result[key] = valueNEWLINENEWLINE return resultNEWLINENEWLINE def to_str(self):NEWLINE """Returns the string representation of the model"""NEWLINE return pprint.pformat(self.to_dict())NEWLINENEWLINE def __repr__(self):NEWLINE """For `print` and `pprint`"""NEWLINE return self.to_str()NEWLINENEWLINE def __eq__(self, other):NEWLINE """Returns true if both objects are equal"""NEWLINE if not isinstance(other, ArrayOfOptions):NEWLINE return FalseNEWLINENEWLINE return self.__dict__ == other.__dict__NEWLINENEWLINE def __ne__(self, other):NEWLINE """Returns true if both objects are not equal"""NEWLINE return not self == otherNEWLINE
# coding: utf-8NEWLINEfrom __future__ import unicode_literalsNEWLINENEWLINEfrom .adobepass import AdobePassIENEWLINEfrom ..utils import (NEWLINE extract_attributes,NEWLINE update_url_query,NEWLINE smuggle_url,NEWLINE)NEWLINENEWLINENEWLINEclass SproutIE(AdobePassIE):NEWLINE _VALID_URL = r'https?://(?:www\.)?sproutonline\.com/watch/(?P<id>[^/?#]+)'NEWLINE _TEST = {NEWLINE 'url': 'http://www.sproutonline.com/watch/cowboy-adventure',NEWLINE 'md5': '74bf14128578d1e040c3ebc82088f45f',NEWLINE 'info_dict': {NEWLINE 'id': '9dexnwtmh8_X',NEWLINE 'ext': 'mp4',NEWLINE 'title': 'A Cowboy Adventure',NEWLINE 'description': 'Ruff-Ruff, Tweet and Dave get to be cowboys for the day at Six Cow Corral.',NEWLINE 'timestamp': 1437758640,NEWLINE 'upload_date': '20150724',NEWLINE 'uploader': 'NBCU-SPROUT-NEW',NEWLINE }NEWLINE }NEWLINENEWLINE def _real_extract(self, url):NEWLINE video_id = self._match_id(url)NEWLINE webpage = self._download_webpage(url, video_id)NEWLINE video_component = self._search_regex(NEWLINE r'(?s)(<div[^>]+data-component="video"[^>]*?>)',NEWLINE webpage, 'video component', default=None)NEWLINE if video_component:NEWLINE options = self._parse_json(extract_attributes(NEWLINE video_component)['data-options'], video_id)NEWLINE theplatform_url = options['video']NEWLINE query = {NEWLINE 'mbr': 'true',NEWLINE 'manifest': 'm3u',NEWLINE }NEWLINE if options.get('protected'):NEWLINE query['auth'] = self._extract_mvpd_auth(url, options['pid'], 'sprout', 'sprout')NEWLINE theplatform_url = smuggle_url(update_url_query(NEWLINE theplatform_url, query), {'force_smil_url': True})NEWLINE else:NEWLINE iframe = self._search_regex(NEWLINE r'(<iframe[^>]+id="sproutVideoIframe"[^>]*?>)',NEWLINE webpage, 'iframe')NEWLINE theplatform_url = extract_attributes(iframe)['src']NEWLINENEWLINE return self.url_result(theplatform_url, 'ThePlatform')NEWLINE
# Copyright (c) 2013 The Chromium Authors. All rights reserved.NEWLINE# Use of this source code is governed by a BSD-style license that can beNEWLINE# found in the LICENSE file.NEWLINENEWLINE"""Parses the command line, discovers the appropriate tests, and runs them.NEWLINENEWLINEHandles test configuration, but all the logic forNEWLINEactually running the test is in Test and PageRunner."""NEWLINENEWLINEimport copyNEWLINEimport inspectNEWLINEimport jsonNEWLINEimport osNEWLINEimport sysNEWLINENEWLINEfrom telemetry import testNEWLINEfrom telemetry.core import browser_optionsNEWLINEfrom telemetry.core import command_lineNEWLINEfrom telemetry.core import discoverNEWLINEfrom telemetry.core import utilNEWLINENEWLINENEWLINEclass Help(command_line.OptparseCommand):NEWLINE """Display help information"""NEWLINENEWLINE def Run(self, options, args):NEWLINE print >> sys.stderr, ('usage: %s <command> [<options>]' % _GetScriptName())NEWLINE print >> sys.stderr, 'Available commands are:'NEWLINE for command in COMMANDS:NEWLINE print >> sys.stderr, ' %-10s %s' % (command.name, command.description)NEWLINE return 0NEWLINENEWLINENEWLINEclass List(command_line.OptparseCommand):NEWLINE """Lists the available tests"""NEWLINENEWLINE usage = '[test_name] [<options>]'NEWLINENEWLINE def __init__(self):NEWLINE super(List, self).__init__()NEWLINE self._tests = NoneNEWLINENEWLINE def AddCommandLineOptions(self, parser):NEWLINE parser.add_option('-j', '--json', action='store_true')NEWLINENEWLINE def ProcessCommandLine(self, parser, options, args):NEWLINE if not args:NEWLINE self._tests = _GetTests()NEWLINE elif len(args) == 1:NEWLINE self._tests = _MatchTestName(args[0])NEWLINE else:NEWLINE parser.error('Must provide at most one test name.')NEWLINENEWLINE def Run(self, options, args):NEWLINE if options.json:NEWLINE test_list = []NEWLINE for test_name, test_class in sorted(self._tests.items()):NEWLINE test_list.append({NEWLINE 'name': test_name,NEWLINE 'description': test_class.__doc__,NEWLINE 'options': test_class.options,NEWLINE })NEWLINE print json.dumps(test_list)NEWLINE else:NEWLINE print >> sys.stderr, 'Available tests are:'NEWLINE _PrintTestList(self._tests)NEWLINE return 0NEWLINENEWLINENEWLINEclass Run(command_line.OptparseCommand):NEWLINE """Run one or more tests"""NEWLINENEWLINE usage = 'test_name [<options>]'NEWLINENEWLINE def __init__(self):NEWLINE super(Run, self).__init__()NEWLINE self._test = NoneNEWLINENEWLINE def CreateParser(self):NEWLINE options = browser_options.BrowserFinderOptions()NEWLINE parser = options.CreateParser('%%prog %s %s' % (self.name, self.usage))NEWLINE return parserNEWLINENEWLINE def AddCommandLineOptions(self, parser):NEWLINE test.Test.AddCommandLineOptions(parser)NEWLINENEWLINE # Allow tests to add their own command line options.NEWLINE matching_tests = {}NEWLINE for arg in sys.argv[1:]:NEWLINE matching_tests.update(_MatchTestName(arg))NEWLINE for test_class in matching_tests.itervalues():NEWLINE test_class.AddTestCommandLineOptions(parser)NEWLINENEWLINE def ProcessCommandLine(self, parser, options, args):NEWLINE if len(args) != 1:NEWLINE parser.error('Must provide one test name.')NEWLINENEWLINE input_test_name = args[0]NEWLINE matching_tests = _MatchTestName(input_test_name)NEWLINE if not matching_tests:NEWLINE print >> sys.stderr, 'No test named "%s".' % input_test_nameNEWLINE print >> sys.stderrNEWLINE print >> sys.stderr, 'Available tests:'NEWLINE _PrintTestList(_GetTests())NEWLINE sys.exit(1)NEWLINE if len(matching_tests) > 1:NEWLINE print >> sys.stderr, 'Multiple tests named "%s".' % input_test_nameNEWLINE print >> sys.stderrNEWLINE print >> sys.stderr, 'Did you mean one of these?'NEWLINE _PrintTestList(matching_tests)NEWLINE sys.exit(1)NEWLINENEWLINE self._test = matching_tests.popitem()[1]NEWLINENEWLINE def Run(self, options, args):NEWLINE return min(255, self._test().Run(copy.copy(options)))NEWLINENEWLINENEWLINECOMMANDS = [cls() for _, cls in inspect.getmembers(sys.modules[__name__])NEWLINE if inspect.isclass(cls)NEWLINE and cls is not command_line.OptparseCommandNEWLINE and issubclass(cls, command_line.OptparseCommand)]NEWLINENEWLINENEWLINEdef _GetScriptName():NEWLINE return os.path.basename(sys.argv[0])NEWLINENEWLINENEWLINEdef _GetTests():NEWLINE base_dir = util.GetBaseDir()NEWLINE tests = discover.DiscoverClasses(base_dir, base_dir, test.Test,NEWLINE index_by_class_name=True)NEWLINE return dict((test.GetName(), test) for test in tests.itervalues())NEWLINENEWLINENEWLINEdef _MatchTestName(input_test_name):NEWLINE def _Matches(input_string, search_string):NEWLINE if search_string.startswith(input_string):NEWLINE return TrueNEWLINE for part in search_string.split('.'):NEWLINE if part.startswith(input_string):NEWLINE return TrueNEWLINE return FalseNEWLINENEWLINE # Exact matching.NEWLINE if input_test_name in test_aliases:NEWLINE exact_match = test_aliases[input_test_name]NEWLINE else:NEWLINE exact_match = input_test_nameNEWLINE if exact_match in _GetTests():NEWLINE return {exact_match: _GetTests()[exact_match]}NEWLINENEWLINE # Fuzzy matching.NEWLINE return dict((test_name, test_class)NEWLINE for test_name, test_class in _GetTests().iteritems()NEWLINE if _Matches(input_test_name, test_name))NEWLINENEWLINENEWLINEdef _PrintTestList(tests):NEWLINE for test_name, test_class in sorted(tests.items()):NEWLINE if test_class.__doc__:NEWLINE description = test_class.__doc__.splitlines()[0]NEWLINE # Align the test names to the longest one.NEWLINE format_string = ' %%-%ds %%s' % max(map(len, tests.iterkeys()))NEWLINE print >> sys.stderr, format_string % (test_name, description)NEWLINE else:NEWLINE print >> sys.stderr, ' %s' % test_nameNEWLINENEWLINENEWLINEtest_aliases = {}NEWLINENEWLINENEWLINEdef Main():NEWLINE # Get the command name from the command line.NEWLINE if len(sys.argv) > 1 and sys.argv[1] == '--help':NEWLINE sys.argv[1] = 'help'NEWLINENEWLINE command_name = 'run'NEWLINE for arg in sys.argv[1:]:NEWLINE if not arg.startswith('-'):NEWLINE command_name = argNEWLINE breakNEWLINENEWLINE # Validate and interpret the command name.NEWLINE commands = [command for command in COMMANDSNEWLINE if command.name.startswith(command_name)]NEWLINE if len(commands) > 1:NEWLINE print >> sys.stderr, ('"%s" is not a %s command. Did you mean one of these?'NEWLINE % (command_name, _GetScriptName()))NEWLINE for command in commands:NEWLINE print >> sys.stderr, ' %-10s %s' % (command.name, command.description)NEWLINE return 1NEWLINE if commands:NEWLINE command = commands[0]NEWLINE else:NEWLINE command = Run()NEWLINENEWLINE # Parse and run the command.NEWLINE parser = command.CreateParser()NEWLINE command.AddCommandLineOptions(parser)NEWLINE options, args = parser.parse_args()NEWLINE if commands:NEWLINE args = args[1:]NEWLINE command.ProcessCommandLine(parser, options, args)NEWLINE return command.Run(options, args)NEWLINE
from collections import OrderedDictNEWLINEfrom typing import ListNEWLINEfrom typing import OptionalNEWLINENEWLINEimport optunaNEWLINEfrom optuna.distributions import BaseDistributionNEWLINEfrom optuna.distributions import CategoricalDistributionNEWLINEfrom optuna.distributions import DiscreteUniformDistributionNEWLINEfrom optuna.distributions import IntLogUniformDistributionNEWLINEfrom optuna.distributions import IntUniformDistributionNEWLINEfrom optuna.distributions import LogUniformDistributionNEWLINEfrom optuna.distributions import UniformDistributionNEWLINEfrom optuna.importance._base import BaseImportanceEvaluatorNEWLINEfrom optuna.logging import get_loggerNEWLINEfrom optuna.study import StudyNEWLINEfrom optuna.trial import TrialStateNEWLINEfrom optuna.visualization._plotly_imports import _importsNEWLINENEWLINEif _imports.is_successful():NEWLINE from optuna.visualization._plotly_imports import goNEWLINENEWLINE import plotlyNEWLINENEWLINE Blues = plotly.colors.sequential.BluesNEWLINENEWLINE _distribution_colors = {NEWLINE UniformDistribution: Blues[-1],NEWLINE LogUniformDistribution: Blues[-1],NEWLINE DiscreteUniformDistribution: Blues[-1],NEWLINE IntUniformDistribution: Blues[-2],NEWLINE IntLogUniformDistribution: Blues[-2],NEWLINE CategoricalDistribution: Blues[-4],NEWLINE }NEWLINENEWLINElogger = get_logger(__name__)NEWLINENEWLINENEWLINEdef plot_param_importances(NEWLINE study: Study, evaluator: BaseImportanceEvaluator = None, params: Optional[List[str]] = NoneNEWLINE) -> "go.Figure":NEWLINE """Plot hyperparameter importances.NEWLINENEWLINE Example:NEWLINENEWLINE The following code snippet shows how to plot hyperparameter importances.NEWLINENEWLINE .. testcode::NEWLINENEWLINE import optunaNEWLINENEWLINE def objective(trial):NEWLINE x = trial.suggest_int("x", 0, 2)NEWLINE y = trial.suggest_float("y", -1.0, 1.0)NEWLINE z = trial.suggest_float("z", 0.0, 1.5)NEWLINE return x ** 2 + y ** 3 - z ** 4NEWLINENEWLINE study = optuna.create_study(sampler=optuna.samplers.RandomSampler())NEWLINE study.optimize(objective, n_trials=100)NEWLINENEWLINE optuna.visualization.plot_param_importances(study)NEWLINENEWLINE .. raw:: htmlNEWLINENEWLINE <iframe src="../../_static/plot_param_importances.html"NEWLINE width="100%" height="500px" frameborder="0">NEWLINE </iframe>NEWLINENEWLINE .. seealso::NEWLINENEWLINE This function visualizes the results of :func:`optuna.importance.get_param_importances`.NEWLINENEWLINE Args:NEWLINE study:NEWLINE An optimized study.NEWLINE evaluator:NEWLINE An importance evaluator object that specifies which algorithm to base the importanceNEWLINE assessment on.NEWLINE Defaults toNEWLINE :class:`~optuna.importance._mean_decrease_impurity.MeanDecreaseImpurityImportanceEvaluator`.NEWLINE params:NEWLINE A list of names of parameters to assess.NEWLINE If :obj:`None`, all parameters that are present in all of the completed trials areNEWLINE assessed.NEWLINENEWLINE Returns:NEWLINE A :class:`plotly.graph_objs.Figure` object.NEWLINE """NEWLINENEWLINE _imports.check()NEWLINENEWLINE layout = go.Layout(NEWLINE title="Hyperparameter Importances",NEWLINE xaxis={"title": "Importance"},NEWLINE yaxis={"title": "Hyperparameter"},NEWLINE showlegend=False,NEWLINE )NEWLINENEWLINE # Importances cannot be evaluated without completed trials.NEWLINE # Return an empty figure for consistency with other visualization functions.NEWLINE trials = [trial for trial in study.trials if trial.state == TrialState.COMPLETE]NEWLINE if len(trials) == 0:NEWLINE logger.warning("Study instance does not contain completed trials.")NEWLINE return go.Figure(data=[], layout=layout)NEWLINENEWLINE importances = optuna.importance.get_param_importances(NEWLINE study, evaluator=evaluator, params=paramsNEWLINE )NEWLINENEWLINE importances = OrderedDict(reversed(list(importances.items())))NEWLINE importance_values = list(importances.values())NEWLINE param_names = list(importances.keys())NEWLINENEWLINE fig = go.Figure(NEWLINE data=[NEWLINE go.Bar(NEWLINE x=importance_values,NEWLINE y=param_names,NEWLINE text=importance_values,NEWLINE texttemplate="%{text:.2f}",NEWLINE textposition="outside",NEWLINE cliponaxis=False, # Ensure text is not clipped.NEWLINE hovertemplate=[NEWLINE _make_hovertext(param_name, importance, study)NEWLINE for param_name, importance in importances.items()NEWLINE ],NEWLINE marker_color=[_get_color(param_name, study) for param_name in param_names],NEWLINE orientation="h",NEWLINE )NEWLINE ],NEWLINE layout=layout,NEWLINE )NEWLINENEWLINE return figNEWLINENEWLINENEWLINEdef _get_distribution(param_name: str, study: Study) -> BaseDistribution:NEWLINE for trial in study.trials:NEWLINE if param_name in trial.distributions:NEWLINE return trial.distributions[param_name]NEWLINE assert FalseNEWLINENEWLINENEWLINEdef _get_color(param_name: str, study: Study) -> str:NEWLINE return _distribution_colors[type(_get_distribution(param_name, study))]NEWLINENEWLINENEWLINEdef _make_hovertext(param_name: str, importance: float, study: Study) -> str:NEWLINE return "{} ({}): {}<extra></extra>".format(NEWLINE param_name, _get_distribution(param_name, study).__class__.__name__, importanceNEWLINE )NEWLINE
#!/usr/bin/env python2NEWLINEimport numpy as npNEWLINEimport path_parserNEWLINEfrom mpl_toolkits.mplot3d import Axes3DNEWLINEimport matplotlib.pyplot as pltNEWLINEfrom matplotlib import cmNEWLINEfrom matplotlib.ticker import LinearLocator, FormatStrFormatterNEWLINEfrom scipy.spatial import KDTreeNEWLINEruta='sample_map_origin_map_1.txt'NEWLINE#ruta='Trayectoria2.txt'NEWLINENEWLINEmap_size_x=250.0 #cmNEWLINEmap_size_y=250.0 #cmNEWLINEresolution = 1.0 #cmNEWLINEmatrix = np.zeros( (map_size_x/resolution,map_size_y/resolution,2),dtype='f' )NEWLINEmatrix_dist = np.zeros( (map_size_x/resolution,map_size_y/resolution),dtype='f' )NEWLINENEWLINEdef show_nearest(target,tree,xy):NEWLINE dist, index = tree.query(target) #Obtiene los puntos mas cercanos al caminoNEWLINE global lookahead_offsetNEWLINE lookahead_offset = np.int(2 + (5/(5*dist+1)))NEWLINE lookahead_target = xy[(index + lookahead_offset) % len(xy)]NEWLINENEWLINE x1, y1 = targetNEWLINE x3, y3 = lookahead_targetNEWLINENEWLINE plt.scatter(*target, color='r')NEWLINE plt.scatter(*xy[index], color='g')NEWLINE ax = plt.axes()NEWLINE ax.arrow(x1, y1, (x3-x1)/5, (y3-y1)/5 , head_width=0.01, head_length=0.01, fc='k', ec='k')NEWLINE plt.scatter(*lookahead_target, color='m')NEWLINE plt.show(block=False)NEWLINE global matrixNEWLINE x_index=np.int(x1*10)NEWLINE y_index=np.int(y1*10)NEWLINENEWLINE matrix[x_index,y_index,0]=x3-x1NEWLINE matrix[x_index,y_index,1]=y3-y1NEWLINENEWLINEdef near(initial_position,xind,yind,tree,xy,ax):NEWLINE dist, index = tree.query(initial_position)NEWLINE global matrix_distNEWLINE matrix_dist[xind,yind]=distNEWLINE #Encontar el punto mas cercano a llegarNEWLINE lookahead_offset = np.int(1 + (5/(5*dist+1)))NEWLINE lookahead_target = xy[(index + lookahead_offset) % len(xy)]NEWLINE x1, y1 = initial_positionNEWLINE x3, y3 = lookahead_targetNEWLINE #print x1,y1,x3,y3NEWLINE #plt.scatter(*initial_position, color='r')NEWLINE #plt.scatter(*xy[index], color='g')NEWLINE #ax.arrow(x1, y1, (x3-x1), (y3-y1) , head_width=0.01, head_length=0.01, fc='k', ec='k')NEWLINE #plt.scatter(*lookahead_target, color='m')NEWLINE x_index=xindNEWLINE y_index=yindNEWLINE matrix[x_index,y_index,0]=x3-x1 #distancias en xNEWLINE matrix[x_index,y_index,1]=y3-y1 #distancias en yNEWLINEdef main():NEWLINE arr_in=np.array(list(path_parser.read_points(ruta)))NEWLINE ax,ay=arr_in.TNEWLINE min_x=np.min(ax)NEWLINE min_y=np.min(ay)NEWLINE max_x=np.max(ax)NEWLINE max_y=np.max(ay)NEWLINE print 'Minimo en x',min_xNEWLINE print 'Minimo en y',min_yNEWLINE print 'Maximo en x',max_xNEWLINE print 'Maximo en y',max_yNEWLINE if (min_x>0 and min_y>0):NEWLINE offsetg=0NEWLINE else:NEWLINE if min_x>min_y:NEWLINE offsetg=min_xNEWLINE else:NEWLINE offsetg=min_yNEWLINE scale_x=0.7*(map_size_x/100)/(max_x-offsetg)NEWLINE scale_y=0.7*(map_size_y/100)/(max_y-offsetg)NEWLINE scale=np.array([scale_x,scale_y])NEWLINE xy = np.multiply(scale,arr_in)+np.array([0.30,0.30])NEWLINE x,y = xy.TNEWLINE fig = plt.figure(figsize=(7,7), facecolor='w')NEWLINE fig.canvas.set_window_title('Trayectoria')NEWLINE plt.plot(x,y)NEWLINE tree = KDTree(xy)NEWLINE plt.plot(x, y, ':o', markersize=4)NEWLINE plt.tight_layout()#Ajusta los titulos de subplots para evitar que salgan de la figura.NEWLINE NEWLINE print('please wait ...')NEWLINE X=np.arange(0,map_size_x/100,resolution/100)NEWLINE Y=np.arange(0,map_size_y/100,resolution/100)NEWLINE X,Y=np.meshgrid(X,Y)NEWLINE lim_x=int(map_size_x/resolution);NEWLINE lim_y=int(map_size_y/resolution);NEWLINE print lim_x,lim_yNEWLINE fig = plt.figure(figsize=(7,7), facecolor='w')NEWLINE fig.canvas.set_window_title('Puntos de prueba')NEWLINE ax = plt.axes()NEWLINE for xi in range(0, lim_x):NEWLINE print float(xi)/lim_x*100NEWLINE for yi in range(0, lim_y):NEWLINE #show_nearest((x,y))NEWLINE near((xi*resolution/100,yi*resolution/100),xi,yi,tree,xy,ax)NEWLINE Z=matrix_dist;NEWLINE fig = plt.figure(figsize=(7,7), facecolor='w')NEWLINE ax = fig.gca(projection='3d')NEWLINE fig.canvas.set_window_title('Distancias')NEWLINE surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,linewidth=0, antialiased=False)NEWLINE plt.show()NEWLINE np.save('TrayA1.npy', matrix)NEWLINE print('matrixForce is saved.')NEWLINENEWLINEif __name__ == '__main__':NEWLINE main()NEWLINE cadena='Hola'NEWLINE
from __future__ import unicode_literalsNEWLINENEWLINEfrom decimal import DecimalNEWLINEimport jsonNEWLINEimport loggingNEWLINEfrom urllib.request import urlopenNEWLINENEWLINEfrom django.core.exceptions import ImproperlyConfiguredNEWLINENEWLINEfrom dj_currencies.sources import CurrencyDataExchangeSourceNEWLINEfrom .exceptions import RateBackendErrorNEWLINEfrom .models import ExchangeRateNEWLINEfrom .settings import currency_settingsNEWLINENEWLINElogger = logging.getLogger(__name__)NEWLINENEWLINENEWLINEclass BaseRateBackend(object):NEWLINENEWLINE def get_latest_rates(self, base_currency, symbols=None):NEWLINE """NEWLINE Fetch latest rates for one base currencyNEWLINE :param base_currency: a three letter currency symbolNEWLINE :param symbols: List of symbols to fetchNEWLINE :return:NEWLINE """NEWLINE raise NotImplementedError()NEWLINENEWLINE def update_rates(self):NEWLINE raise NotImplementedError()NEWLINENEWLINENEWLINEclass OpenExchangeBackend(BaseRateBackend):NEWLINENEWLINE def __init__(self):NEWLINE if not currency_settings.OPENEXCHANGE_APP_ID:NEWLINE raise ImproperlyConfigured(NEWLINE "OPENEXCHANGE_APP_ID setting should not be empty when using OpenExchangeBackend")NEWLINENEWLINE if not currency_settings.BASE_CURRENCIES:NEWLINE raise ImproperlyConfigured(NEWLINE "BASE_CURRENCIES setting should not be empty. It should be set as a three letter currency code")NEWLINENEWLINE # Build the base api urlNEWLINE self.base_url = 'https://openexchangerates.org/api/latest.json?app_id={0}'.format(NEWLINE currency_settings.OPENEXCHANGE_APP_IDNEWLINE )NEWLINENEWLINE def get_end_point_url(self, base_currency, symbols):NEWLINE url = self.base_url + '&base={0}'.format(base_currency)NEWLINE if symbols:NEWLINE symbol_args = ','.join(symbols)NEWLINE url = url + '&symbols={0}'.format(symbol_args)NEWLINE return urlNEWLINENEWLINE def get_cached_rates(self, symbols=None):NEWLINE if not symbols:NEWLINE return {}NEWLINENEWLINE ex_rates = ExchangeRate.objects.order_by('base_currency', '-last_updated_at').filter(NEWLINE base_currency__in=symbolsNEWLINE ).distinct('base_currency')[:len(symbols)]NEWLINENEWLINE return {ex_rate.base_currency: ex_rate.rates for ex_rate in ex_rates}NEWLINENEWLINE def get_latest_rates(self, base_currency, symbols=None):NEWLINE url = self.get_end_point_url(base_currency, symbols)NEWLINENEWLINE try:NEWLINE data = urlopen(url).read().decode("utf-8")NEWLINE return json.loads(data)['rates']NEWLINE except Exception as e:NEWLINE logger.exception("Error retrieving data from %s", url)NEWLINE raise RateBackendError("Error retrieving rates: %s" % e)NEWLINENEWLINE def update_rates(self):NEWLINE for currency in currency_settings.BASE_CURRENCIES:NEWLINE print('Updating exchange rates with base currency {0}'.format(currency))NEWLINE rates = self.get_latest_rates(currency)NEWLINE ExchangeRate.objects.create(NEWLINE base_currency=currency,NEWLINE rates=rates,NEWLINE source=CurrencyDataExchangeSource.OPENEXCHANGERATES,NEWLINE )NEWLINENEWLINE def convert_money(self, amount, currency_from, currency_to):NEWLINE ex_rate = ExchangeRate.objects.base_currency(currency_from).within_days(NEWLINE currency_settings.MAX_CACHE_DAYS)NEWLINENEWLINE if isinstance(amount, float):NEWLINE amount = Decimal(amount).quantize(Decimal('.000001'))NEWLINENEWLINE rate_to = ex_rate.rates.get(currency_to)NEWLINENEWLINE if not rate_to:NEWLINE raise RateBackendError(NEWLINE 'No exchange rate found from {0} to {1}'.format(ex_rate.base_currency, currency_to))NEWLINE rate_to = Decimal(str(rate_to)).quantize(Decimal('.000001'))NEWLINE converted_amount = amount * rate_toNEWLINENEWLINE return converted_amount.quantize(Decimal('1.00'))NEWLINE
import asyncioNEWLINENEWLINEfrom unittest.mock import patch, MagicMockNEWLINENEWLINEfrom icap import ICAPRequest, HeadersDict, handlerNEWLINEfrom icap.session import make_session_id, should_finalize_session, get_session, SessionStorageNEWLINEfrom icap.criteria import _HANDLERSNEWLINENEWLINENEWLINEdef test_make_session_id():NEWLINE req = ICAPRequest()NEWLINE with patch('icap.session.uuid.uuid4') as mock_uuid:NEWLINE mock_uuid.return_value.hex = 'cool hash'NEWLINE assert make_session_id(req) == 'cool hash'NEWLINENEWLINE req.headers['X-Session-ID'] = 'cool session id'NEWLINENEWLINE assert make_session_id(req) == 'cool session id'NEWLINENEWLINENEWLINEdef test_SessionStorage():NEWLINE t = SessionStorage.get('foo', MagicMock())NEWLINENEWLINE assert t['id'] == 'foo'NEWLINE assert 'foo' in SessionStorage.sessionsNEWLINE assert SessionStorage.get('foo', MagicMock()) is tNEWLINENEWLINE assert SessionStorage.finalize('foo')NEWLINE assert not SessionStorage.finalize('foo')NEWLINENEWLINE assert 'foo' not in SessionStorage.sessionsNEWLINE assert SessionStorage.get('foo', MagicMock()) is not tNEWLINENEWLINENEWLINEdef test_get_session():NEWLINE request = MagicMock(headers=HeadersDict())NEWLINE request.http.request_line.uri = 'foo'NEWLINE request.headers['X-Session-ID'] = 'bar'NEWLINENEWLINE session = asyncio.get_event_loop().run_until_complete(get_session(request))NEWLINENEWLINE assert session['url'] == 'foo'NEWLINE assert session['id'] == 'bar'NEWLINENEWLINENEWLINEdef test_should_finalize_session():NEWLINE _HANDLERS.clear()NEWLINENEWLINE assert not should_finalize_session(MagicMock(is_options=True))NEWLINE assert should_finalize_session(MagicMock(is_options=False, is_respmod=True))NEWLINE assert should_finalize_session(MagicMock(is_options=False, is_respmod=False, headers=HeadersDict()))NEWLINENEWLINE request = MagicMock(is_options=False, is_respmod=False, headers=HeadersDict())NEWLINE request.headers['X-Session-ID'] = 'foo'NEWLINENEWLINE @handler()NEWLINE def respmod(request):NEWLINE passNEWLINENEWLINE @handler(name='foo')NEWLINE def respmod(request):NEWLINE passNEWLINENEWLINE for p in ['/reqmod', '/reqmod/', '/foo/reqmod', '/foo/reqmod/']:NEWLINE request.request_line.uri.path = pNEWLINE assert not should_finalize_session(request)NEWLINENEWLINE for p in ['/bar/reqmod', '/bar/reqmod/']:NEWLINE request.request_line.uri.path = pNEWLINE assert should_finalize_session(request)NEWLINE
import osNEWLINEimport shutilNEWLINEimport unittestNEWLINEfrom xbrr.edinet.client.document_client import DocumentClientNEWLINEfrom xbrr.edinet.reader.doc import DocNEWLINENEWLINENEWLINEclass TestDoc(unittest.TestCase):NEWLINENEWLINE @classmethodNEWLINE def setUpClass(cls):NEWLINE cls._dir = os.path.join(os.path.dirname(__file__), "../data")NEWLINE client = DocumentClient()NEWLINE cls.root_dir = client.get_xbrl("S100FGR9", save_dir=cls._dir,NEWLINE expand_level="dir")NEWLINE cls.doc = Doc(root_dir=cls.root_dir, xbrl_kind="public")NEWLINENEWLINE @classmethodNEWLINE def tearDownClass(cls):NEWLINE shutil.rmtree(cls.root_dir)NEWLINENEWLINE def test_doc(self):NEWLINE doc = self.docNEWLINENEWLINE self.assertGreater(len(doc.xsd.find_all("element")), 0)NEWLINE self.assertGreater(len(doc.cal.find_all("calculationLink")), 0)NEWLINE self.assertGreater(len(doc.def_.find_all("definitionArc")), 0)NEWLINE self.assertGreater(len(doc.lab.find_all("labelLink")), 0)NEWLINE self.assertGreater(len(doc.lab_en.find_all("labelLink")), 0)NEWLINE self.assertGreater(len(doc.pre.find_all("presentationLink")), 0)NEWLINE self.assertTrue(doc.man.find("manifest"))NEWLINENEWLINE def test_find_xsduri(self):NEWLINE doc = self.docNEWLINE self.assertEqual(doc.find_xsduri("http://disclosure.edinet-fsa.go.jp/taxonomy/jpcrp/2018-02-28/jpcrp_cor"),NEWLINE "http://disclosure.edinet-fsa.go.jp/taxonomy/jpcrp/2018-02-28/jpcrp_cor_2018-02-28.xsd")NEWLINENEWLINE self.assertEqual(doc.find_xsduri("http://disclosure.edinet-fsa.go.jp/jpcrp030000/asr/001/E01726-000/2018-12-31/01/2019-03-27"),NEWLINE "jpcrp030000-asr-001_E01726-000_2018-12-31_01_2019-03-27.xsd")NEWLINE self.assertEqual(doc.find_xsduri("local"),NEWLINE "jpcrp030000-asr-001_E01726-000_2018-12-31_01_2019-03-27.xsd")NEWLINENEWLINE def test_find_laburi(self):NEWLINE doc = self.docNEWLINE self.assertEqual(doc.find_laburi('local', 'lab'), "jpcrp030000-asr-001_E01726-000_2018-12-31_01_2019-03-27_lab.xml")NEWLINE self.assertEqual(doc.find_laburi('jpcrp030000-asr-001_E01726-000_2018-12-31_01_2019-03-27.xsd', 'lab'), "jpcrp030000-asr-001_E01726-000_2018-12-31_01_2019-03-27_lab.xml")NEWLINENEWLINE self.assertEqual(doc.find_laburi(doc.find_xsduri('http://disclosure.edinet-fsa.go.jp/taxonomy/jpcrp/2018-02-28/jpcrp_cor'), 'lab'), NEWLINE "http://disclosure.edinet-fsa.go.jp/taxonomy/jpcrp/2018-02-28/label/jpcrp_2018-02-28_lab.xml")NEWLINE
import thulacNEWLINEfrom py2neo import Graph,Node,Relationship,NodeMatcherNEWLINENEWLINENEWLINE# print(cut_text)NEWLINEentity_dict = {}NEWLINEentity_type_dict = {}NEWLINEdef init():NEWLINE with open("entity.txt","r",encoding="utf-8") as f:NEWLINE for line in f.readlines():NEWLINE line = line.strip('\n')NEWLINE line = line.split('\t')NEWLINE entity_dict[line[1]] = line[0]NEWLINENEWLINE with open("entity_type.txt", "r", encoding="utf-8") as f:NEWLINE for line in f.readlines():NEWLINE line = line.strip('\n')NEWLINE line = line.split(' ')NEWLINE entity_type_dict[line[0]] = line[1]NEWLINENEWLINEdef analyse_question(text,cutter):NEWLINE cut_text = cutter.cut(text)NEWLINE entity_list = []NEWLINE entity_type_list = []NEWLINE for seg in cut_text:NEWLINE if seg[0] in entity_type_dict.keys():NEWLINE entity_type_list.append(seg[0])NEWLINE elif seg[0] in entity_dict.keys():NEWLINE entity_list.append(seg[0])NEWLINE # print(cut_text)NEWLINE # print(entity_list)NEWLINE # print(entity_type_list)NEWLINE graph = Graph("http://localhost:7474", username="", password='')NEWLINE for entity in entity_list:NEWLINE for entity_type in entity_type_list:NEWLINE # print("MATCH (entity1) - [rel] - (entity2:%s) WHERE entity1.name ='%s' RETURN entity2.name"%(entity_type_dict[entity_type],entity))NEWLINE ans = graph.run("MATCH (entity1) - [rel] - (entity2:%s) WHERE entity1.name ='%s' RETURN entity2.name"%(entity_type_dict[entity_type],entity)).data()NEWLINE if len(ans) is not 0:NEWLINE response = "%s相关的%s有:"%(entity,entity_type)NEWLINE # print(ans)NEWLINE for dict in ans:NEWLINE response += dict['entity2.name'] + ","NEWLINE response = response[:-1]NEWLINE print(response)NEWLINE else:NEWLINE response = "%s相关的%s暂未有了解,我们会继续完善补充数据的!(≧∀≦)ゞ"%(entity,entity_type)NEWLINE print(response)NEWLINE if len(entity_type_list) == 0:NEWLINE response = "您所提问%s的相关内容我们暂不了解,我们会继续完善补充数据的!(≧∀≦)ゞ"%(entity)NEWLINE print(response)NEWLINE if len(entity_list) == 0:NEWLINE response = "您所提问的我们暂不了解,我们会继续完善补充数据的!(≧∀≦)ゞ"NEWLINE print(response)NEWLINENEWLINEif __name__ == "__main__":NEWLINE init()NEWLINE # text = "我想了解苏州瑞博公司的研究平台"NEWLINE cutter = thulac.thulac(user_dict="user_dict.txt")NEWLINE while True:NEWLINE text = input('TechBot请您提问...>>')NEWLINE analyse_question(text,cutter=cutter)NEWLINE
from django.shortcuts import renderNEWLINEfrom django.contrib.auth.models import UserNEWLINEfrom django.http import Http404NEWLINEfrom django.views.generic import DetailViewNEWLINEfrom django.contrib.auth.decorators import login_requiredNEWLINEfrom django.utils.decorators import method_decoratorNEWLINEfrom django.shortcuts import get_object_or_404NEWLINENEWLINEfrom comics.models import (NEWLINE Comic,NEWLINE Post,NEWLINE ContributorNEWLINE)NEWLINENEWLINENEWLINEclass ProfileView(DetailView):NEWLINE template_name="profile.html"NEWLINE model = UserNEWLINENEWLINE def dispatch(self, *args, **kwargs):NEWLINE if kwargs.get('username'):NEWLINE self.user = get_object_or_404(User, username=kwargs.get('username'))NEWLINE elif self.request.user:NEWLINE self.user = self.request.userNEWLINE else:NEWLINE raise Http404()NEWLINE return super(ProfileView, self).dispatch(*args, **kwargs)NEWLINENEWLINE def get_object(self):NEWLINE return self.userNEWLINENEWLINE def get_context_data(self, **kwargs):NEWLINE context = super(ProfileView, self).get_context_data(**kwargs)NEWLINENEWLINE contributions = Contributor.objects.filter(contributor=self.user)NEWLINENEWLINE comics = Comic.published_comics.filter(NEWLINE post__contributor__in=contributionsNEWLINE ).order_by('-published')NEWLINENEWLINE posts = Post.published_posts.filter(NEWLINE contributor__in=contributionsNEWLINE ).exclude(NEWLINE id__in=comics.values_list('post')NEWLINE ).order_by('-published')NEWLINENEWLINE context['display_user'] = self.userNEWLINE context['posts'] = postsNEWLINE context['comics'] = comicsNEWLINENEWLINE return context
class Solution:NEWLINE def solve(self, matrix):NEWLINE if not matrix:NEWLINE return -1NEWLINE NEWLINE sets = [set(row) for row in matrix]NEWLINE return next((num for num in matrix[0] if all(num in row for row in matrix)), -1)NEWLINE
import jsonNEWLINEimport os.pathNEWLINENEWLINEclass Inventory:NEWLINE pets = {}NEWLINENEWLINE def __init__(self) -> None:NEWLINE self.load()NEWLINENEWLINE def add(self, key , qty):NEWLINE q = 0NEWLINE if key in self.pets: # test to make sure key existNEWLINE v = self.pets[key]NEWLINE q = v + qtyNEWLINE else:NEWLINE q = qtyNEWLINE self.pets[key] = qNEWLINE print(f'Added {qty} {key}: total = {self.pets[key]}')NEWLINE NEWLINENEWLINE def remove(self, key, qty):NEWLINE if key in self.pets: # test to make sure key existNEWLINE v = self.pets[key]NEWLINE q = v - qtyNEWLINE if q < 0:NEWLINE q = 0NEWLINE self.pets[key] = qNEWLINE print(f'Removed {qty} {key}: total = {self.pets[key]}')NEWLINENEWLINE def display(self):NEWLINE for key, value in self.pets.items(): # For loop iterating the dictionaryNEWLINE print(f'The key {key} = value {value}')NEWLINENEWLINE def load(self):NEWLINE if not os.path.exists('inventory.txt'):NEWLINE print('Skipping , nothing to load')NEWLINE returnNEWLINE print('Loading data from invnetory')NEWLINE with open('inventory.txt', 'r') as f:NEWLINE self.pets = json.load(f)NEWLINE print('loaded')NEWLINENEWLINE def save(self):NEWLINE print('Saving in Inventory')NEWLINE with open('inventory.txt', 'w') as f: # with is autoclosableNEWLINE json.dump(self.pets, f)NEWLINE print('Saved')NEWLINENEWLINEdef main():NEWLINE inv = Inventory()NEWLINE while True:NEWLINE action = input('Actions: Add, remove, list, save ,exit')NEWLINE if action == 'add' or action == 'remove':NEWLINE key = input('Enter the animal: ')NEWLINE qty = int(input('Enter the qty:'))NEWLINE if action == 'add':NEWLINE inv.add(key, qty)NEWLINE if action == 'remove':NEWLINE inv.remove(key, qty)NEWLINE if action == 'exit':NEWLINE breakNEWLINE if action == 'list':NEWLINE inv.display()NEWLINE if action == 'save':NEWLINE inv.save()NEWLINE inv.save()NEWLINEif __name__ == '__main__':NEWLINE main()
# Copyright (c) Chris Choy (chrischoy@ai.stanford.edu) and Wei Dong (weidong@andrew.cmu.edu)NEWLINE#NEWLINE# Please cite the following papers if you use any part of the code.NEWLINE# - Christopher Choy, Wei Dong, Vladlen Koltun, Deep Global Registration, CVPR 2020NEWLINE# - Christopher Choy, Jaesik Park, Vladlen Koltun, Fully Convolutional Geometric Features, ICCV 2019NEWLINE# - Christopher Choy, JunYoung Gwak, Silvio Savarese, 4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural Networks, CVPR 2019NEWLINE# Run with python -m scripts.test_3dmatch_refactorNEWLINEimport osNEWLINEimport sysNEWLINEimport mathNEWLINEimport loggingNEWLINEimport open3d as o3dNEWLINEimport numpy as npNEWLINEimport timeNEWLINEimport torchNEWLINEimport copyNEWLINENEWLINEsys.path.append('.')NEWLINEimport MinkowskiEngine as MENEWLINEfrom config import get_configNEWLINEfrom model import load_modelNEWLINENEWLINEfrom dataloader.data_loaders import ThreeDMatchTrajectoryDatasetNEWLINEfrom core.knn import find_knn_gpuNEWLINEfrom core.deep_global_registration import DeepGlobalRegistrationNEWLINENEWLINEfrom util.timer import TimerNEWLINEfrom util.pointcloud import make_open3d_point_cloudNEWLINENEWLINEo3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Warning)NEWLINEch = logging.StreamHandler(sys.stdout)NEWLINElogging.getLogger().setLevel(logging.INFO)NEWLINElogging.basicConfig(format='%(asctime)s %(message)s',NEWLINE datefmt='%m/%d %H:%M:%S',NEWLINE handlers=[ch])NEWLINENEWLINE# CriteriaNEWLINEdef rte_rre(T_pred, T_gt, rte_thresh, rre_thresh, eps=1e-16):NEWLINE if T_pred is None:NEWLINE return np.array([0, np.inf, np.inf])NEWLINENEWLINE rte = np.linalg.norm(T_pred[:3, 3] - T_gt[:3, 3])NEWLINE rre = np.arccos(NEWLINE np.clip((np.trace(T_pred[:3, :3].T @ T_gt[:3, :3]) - 1) / 2, -1 + eps,NEWLINE 1 - eps)) * 180 / math.piNEWLINE return np.array([rte < rte_thresh and rre < rre_thresh, rte, rre])NEWLINENEWLINENEWLINEdef analyze_stats(stats, mask, method_names):NEWLINE mask = (mask > 0).squeeze(1)NEWLINE stats = stats[:, mask, :]NEWLINENEWLINE print('Total result mean')NEWLINE for i, method_name in enumerate(method_names):NEWLINE print(method_name)NEWLINE print("Reg Recall; Mean TE; Mean RE; Mean Time; --; Mean Precision; Mean Recall")NEWLINE print(stats[i].mean(0))NEWLINENEWLINE print('Total successful result mean')NEWLINE for i, method_name in enumerate(method_names):NEWLINE sel = stats[i][:, 0] > 0NEWLINE sel_stats = stats[i][sel]NEWLINE print(method_name)NEWLINE print("Success Rate; Mean TE; Mean RE; Mean Time; --; Mean Precision; Mean Recall")NEWLINE print(sel_stats.mean(0))NEWLINENEWLINENEWLINEdef create_pcd(xyz, color):NEWLINE # n x 3NEWLINE n = xyz.shape[0]NEWLINE pcd = o3d.geometry.PointCloud()NEWLINE pcd.points = o3d.utility.Vector3dVector(xyz)NEWLINE pcd.colors = o3d.utility.Vector3dVector(np.tile(color, (n, 1)))NEWLINE pcd.estimate_normals(NEWLINE search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))NEWLINE return pcdNEWLINENEWLINENEWLINEdef draw_geometries_flip(pcds):NEWLINE pcds_transform = []NEWLINE flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]NEWLINE for pcd in pcds:NEWLINE pcd_temp = copy.deepcopy(pcd)NEWLINE pcd_temp.transform(flip_transform)NEWLINE pcds_transform.append(pcd_temp)NEWLINE o3d.visualization.draw_geometries(pcds_transform)NEWLINENEWLINENEWLINEdef evaluate(methods, method_names, data_loader, config, debug=False):NEWLINENEWLINE tot_num_data = len(data_loader.dataset)NEWLINE data_loader_iter = iter(data_loader)NEWLINENEWLINE # Accumulate success, rre, rte, time, sidNEWLINE mask = np.zeros((tot_num_data, 1)).astype(int)NEWLINE stats = np.zeros((len(methods), tot_num_data, 7))NEWLINENEWLINE dataset = data_loader.datasetNEWLINE subset_names = open(dataset.DATA_FILES[dataset.phase]).read().split()NEWLINENEWLINE total_safe_guard = 0NEWLINE for batch_idx in range(tot_num_data):NEWLINE batch = data_loader_iter.next()NEWLINENEWLINE # Skip too sparse point cloudsNEWLINE sname, xyz0, xyz1, trans = batch[0]NEWLINENEWLINE sid = subset_names.index(sname)NEWLINE T_gt = np.linalg.inv(trans)NEWLINENEWLINE NEWLINE for i, method in enumerate(methods):NEWLINE start = time.time()NEWLINE T, precision, recall, outlier_rejection_time, safe_guard = method.register(xyz0, xyz1, T_gt=T_gt)NEWLINE end = time.time()NEWLINE total_safe_guard += safe_guardNEWLINENEWLINE # VisualizeNEWLINE if debug:NEWLINE print(method_names[i])NEWLINE pcd0 = create_pcd(xyz0, np.array([1, 0.706, 0]))NEWLINE pcd1 = create_pcd(xyz1, np.array([0, 0.651, 0.929]))NEWLINENEWLINE pcd0.transform(T)NEWLINE draw_geometries_flip([pcd0, pcd1])NEWLINE pcd0.transform(np.linalg.inv(T))NEWLINENEWLINE stats[i, batch_idx, :3] = rte_rre(T, T_gt, config.success_rte_thresh,NEWLINE config.success_rre_thresh)NEWLINE stats[i, batch_idx, 3] = outlier_rejection_time # not including feature extraction time.NEWLINE stats[i, batch_idx, 4] = safe_guardNEWLINE stats[i, batch_idx, 5] = precisionNEWLINE stats[i, batch_idx, 6] = recallNEWLINE mask[batch_idx] = 1NEWLINE if stats[i, batch_idx, 0] == 0:NEWLINE print(f"{method_names[i]}: failed")NEWLINENEWLINE if batch_idx % 10 == 9:NEWLINE print('Summary {} / {}'.format(batch_idx, tot_num_data))NEWLINE print(f"Safe guard number: {total_safe_guard} / {batch_idx}")NEWLINE analyze_stats(stats, mask, method_names)NEWLINENEWLINE NEWLINE # Save resultsNEWLINE print(f"Total safe guard ratio: {total_safe_guard} / {tot_num_data}")NEWLINE filename = f'3dmatch-stats_{method.__class__.__name__}' + '_noicp_fpfh'NEWLINE if os.path.isdir(config.out_dir):NEWLINE out_file = os.path.join(config.out_dir, filename)NEWLINE else:NEWLINE out_file = filename # save it on the current directoryNEWLINE print(f'Saving the stats to {out_file}')NEWLINE np.savez(out_file, stats=stats, names=method_names)NEWLINE analyze_stats(stats, mask, method_names)NEWLINENEWLINE # Analysis per sceneNEWLINE for i, method in enumerate(methods):NEWLINE print(f'Scene-wise mean {method}')NEWLINE scene_vals = np.zeros((len(subset_names), 3))NEWLINE for sid, sname in enumerate(subset_names):NEWLINE curr_scene = stats[i, :, 4] == sidNEWLINE scene_vals[sid] = (stats[i, curr_scene, :3]).mean(0)NEWLINENEWLINE print('All scenes')NEWLINE print(scene_vals)NEWLINE print('Scene average')NEWLINE print(scene_vals.mean(0))NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE config = get_config()NEWLINE print(config)NEWLINENEWLINE dgr = DeepGlobalRegistration(config)NEWLINENEWLINE methods = [dgr]NEWLINE method_names = ['DGR']NEWLINENEWLINE dset = ThreeDMatchTrajectoryDataset(phase='test',NEWLINE transform=None,NEWLINE random_scale=False,NEWLINE random_rotation=False,NEWLINE config=config)NEWLINENEWLINE data_loader = torch.utils.data.DataLoader(dset,NEWLINE batch_size=1,NEWLINE shuffle=False,NEWLINE num_workers=10,NEWLINE collate_fn=lambda x: x,NEWLINE pin_memory=False,NEWLINE drop_last=True)NEWLINENEWLINE evaluate(methods, method_names, data_loader, config, debug=False)NEWLINE
# vim: set et sw=4 sts=4 fileencoding=utf-8:NEWLINE#NEWLINE# Raspberry Pi Sense HAT Emulator library for the Raspberry PiNEWLINE# Copyright (c) 2016 Raspberry Pi Foundation <info@raspberrypi.org>NEWLINE#NEWLINE# This package is free software; you can redistribute it and/or modify it underNEWLINE# the terms of the GNU Lesser General Public License as published by the FreeNEWLINE# Software Foundation; either version 2.1 of the License, or (at your option)NEWLINE# any later version.NEWLINE#NEWLINE# This package is distributed in the hope that it will be useful, but WITHOUTNEWLINE# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESSNEWLINE# FOR A PARTICULAR PURPOSE. See the GNU General Public License for moreNEWLINE# details.NEWLINE#NEWLINE# You should have received a copy of the GNU Lesser General Public LicenseNEWLINE# along with this program. If not, see <http://www.gnu.org/licenses/>NEWLINENEWLINEfrom __future__ import (NEWLINE unicode_literals,NEWLINE absolute_import,NEWLINE print_function,NEWLINE division,NEWLINE )NEWLINEnstr = strNEWLINEstr = type('')NEWLINENEWLINEimport sysNEWLINEimport osNEWLINEimport ioNEWLINEimport mmapNEWLINEimport errnoNEWLINEfrom struct import StructNEWLINEfrom collections import namedtupleNEWLINEfrom random import RandomNEWLINEfrom time import timeNEWLINEfrom threading import Thread, EventNEWLINEfrom math import isnanNEWLINENEWLINEimport numpy as npNEWLINENEWLINEfrom .common import clampNEWLINENEWLINENEWLINE# See LPS25H data-sheet for details of register valuesNEWLINEPRESSURE_FACTOR = 4096NEWLINETEMP_OFFSET = 37NEWLINETEMP_FACTOR = 480NEWLINEPRESSURE_DATA = Struct(nstr(NEWLINE '@' # native modeNEWLINE 'B' # pressure sensor typeNEWLINE '6p' # pressure sensor nameNEWLINE 'l' # P_REFNEWLINE 'l' # P_OUTNEWLINE 'h' # T_OUTNEWLINE 'B' # P_VALIDNEWLINE 'B' # T_VALIDNEWLINE ))NEWLINENEWLINEPressureData = namedtuple('PressureData',NEWLINE ('type', 'name', 'P_REF', 'P_OUT', 'T_OUT', 'P_VALID', 'T_VALID'))NEWLINENEWLINENEWLINEdef pressure_filename():NEWLINE """NEWLINE Return the filename used to represent the state of the emulated sense HAT'sNEWLINE pressure sensor. On UNIX we try ``/dev/shm`` then fall back to ``/tmp``; onNEWLINE Windows we use whatever ``%TEMP%`` containsNEWLINE """NEWLINE fname = 'rpi-sense-emu-pressure'NEWLINE if sys.platform.startswith('win'):NEWLINE # just use a temporary file on WindowsNEWLINE return os.path.join(os.environ['TEMP'], fname)NEWLINE else:NEWLINE if os.path.exists('/dev/shm'):NEWLINE return os.path.join('/dev/shm', fname)NEWLINE else:NEWLINE return os.path.join('/tmp', fname)NEWLINENEWLINENEWLINEdef init_pressure():NEWLINE """NEWLINE Opens the file representing the state of the pressure sensors. TheNEWLINE file-like object is returned.NEWLINENEWLINE If the file already exists we simply make sure it is the right size. IfNEWLINE the file does not already exist, it is created and zeroed.NEWLINE """NEWLINE try:NEWLINE # Attempt to open the IMU's device file and ensure it's the right sizeNEWLINE fd = io.open(pressure_filename(), 'r+b', buffering=0)NEWLINE fd.seek(PRESSURE_DATA.size)NEWLINE fd.truncate()NEWLINE except IOError as e:NEWLINE # If the screen's device file doesn't exist, create it with reasonableNEWLINE # initial valuesNEWLINE if e.errno == errno.ENOENT:NEWLINE fd = io.open(pressure_filename(), 'w+b', buffering=0)NEWLINE fd.write(b'\x00' * PRESSURE_DATA.size)NEWLINE else:NEWLINE raiseNEWLINE return fdNEWLINENEWLINENEWLINEclass PressureServer(object):NEWLINE def __init__(self, simulate_noise=True):NEWLINE self._random = Random()NEWLINE self._fd = init_pressure()NEWLINE self._map = mmap.mmap(self._fd.fileno(), 0, access=mmap.ACCESS_WRITE)NEWLINE data = self._read()NEWLINE if data.type != 3:NEWLINE self._write(PressureData(3, b'LPS25H', 0, 0, 0, 0, 0))NEWLINE self._pressure = 1013.0NEWLINE self._temperature = 20.0NEWLINE else:NEWLINE self._pressure = data.P_OUT / 4096NEWLINE self._temperature = data.T_OUT / 480 + 42.5NEWLINE self._noise_thread = NoneNEWLINE self._noise_event = Event()NEWLINE self._noise_write()NEWLINE # The queue lengths are selected to accurately represent the responseNEWLINE # time of the sensorsNEWLINE self._pressures = np.full((25,), self._pressure, dtype=np.float)NEWLINE self._temperatures = np.full((25,), self._temperature, dtype=np.float)NEWLINE self.simulate_noise = simulate_noiseNEWLINENEWLINE def close(self):NEWLINE if self._fd:NEWLINE self.simulate_noise = FalseNEWLINE self._map.close()NEWLINE self._fd.close()NEWLINE self._fd = NoneNEWLINE self._map = NoneNEWLINENEWLINE def _perturb(self, value, error):NEWLINE """NEWLINE Return *value* perturbed by +/- *error* which is derived from aNEWLINE gaussian random generator.NEWLINE """NEWLINE # We use an internal Random() instance here to avoid a threading issueNEWLINE # with the gaussian generator (could use locks, but an instance ofNEWLINE # Random is easier and faster)NEWLINE return value + self._random.gauss(0, 0.2) * errorNEWLINENEWLINE def _read(self):NEWLINE return PressureData(*PRESSURE_DATA.unpack_from(self._map))NEWLINENEWLINE def _write(self, value):NEWLINE PRESSURE_DATA.pack_into(self._map, 0, *value)NEWLINENEWLINE @propertyNEWLINE def pressure(self):NEWLINE return self._pressureNEWLINENEWLINE @propertyNEWLINE def temperature(self):NEWLINE return self._temperatureNEWLINENEWLINE def set_values(self, pressure, temperature):NEWLINE self._pressure = pressureNEWLINE self._temperature = temperatureNEWLINE if not self._noise_thread:NEWLINE self._noise_write()NEWLINENEWLINE @propertyNEWLINE def simulate_noise(self):NEWLINE return self._noise_thread is not NoneNEWLINENEWLINE @simulate_noise.setterNEWLINE def simulate_noise(self, value):NEWLINE if value and not self._noise_thread:NEWLINE self._noise_event.clear()NEWLINE self._noise_thread = Thread(target=self._noise_loop)NEWLINE self._noise_thread.daemon = TrueNEWLINE self._noise_thread.start()NEWLINE elif self._noise_thread and not value:NEWLINE self._noise_event.set()NEWLINE self._noise_thread.join()NEWLINE self._noise_thread = NoneNEWLINE self._noise_write()NEWLINENEWLINE def _noise_loop(self):NEWLINE while not self._noise_event.wait(0.04):NEWLINE self._noise_write()NEWLINENEWLINE def _noise_write(self):NEWLINE if self.simulate_noise:NEWLINE self._pressures[1:] = self._pressures[:-1]NEWLINE self._pressures[0] = self._perturb(self.pressure, (NEWLINE 0.2 if 800 <= self.pressure <= 1100 and 20 <= self.temperature <= 60 elseNEWLINE 1.0))NEWLINE self._temperatures[1:] = self._temperatures[:-1]NEWLINE self._temperatures[0] = self._perturb(self.temperature, (NEWLINE 2.0 if 0 <= self.temperature <= 65 elseNEWLINE 4.0))NEWLINE pressure = self._pressures.mean()NEWLINE temperature = self._temperatures.mean()NEWLINE else:NEWLINE pressure = self.pressureNEWLINE temperature = self.temperatureNEWLINE self._write(self._read()._replace(NEWLINE P_VALID=not isnan(pressure),NEWLINE T_VALID=not isnan(temperature),NEWLINE P_OUT=0 if isnan(pressure) else int(clamp(pressure, 260, 1260) * PRESSURE_FACTOR),NEWLINE T_OUT=0 if isnan(temperature) else int((clamp(temperature, -30, 105) - TEMP_OFFSET) * TEMP_FACTOR),NEWLINE ))NEWLINENEWLINENEWLINE
import spacyNEWLINEfrom spacy import symbolsNEWLINENEWLINEclass SentenceAnalyzer:NEWLINE '''NEWLINE Class for analyzing sentence structure and extracting key informationNEWLINE '''NEWLINENEWLINE def __init__(self):NEWLINE self.nlp = spacy.load('en_core_web_sm')NEWLINE NEWLINE #TODO method for checking the form of a sentenceNEWLINE #TODO return all strings in lower caseNEWLINENEWLINE def subject_object_analysis(self, clause):NEWLINE '''NEWLINE Processes a clause and locates the subject, object, and verbNEWLINE '''NEWLINE # check to make sure we have only one clauseNEWLINE if self.is_compound(clause):NEWLINE raise Exception('''Sentence has multiple clauses;NEWLINE compound sentences must be split before processing''')NEWLINENEWLINE #TODO use chunking or make into arraysNEWLINE subj = NoneNEWLINE obj = NoneNEWLINE verb = NoneNEWLINENEWLINE out = self.nlp(clause)NEWLINENEWLINE for word in out:NEWLINE if word.dep == symbols.nsubj:NEWLINE subj = wordNEWLINE elif word.dep == symbols.dobj or word.dep == symbols.pobj:NEWLINE obj = wordNEWLINENEWLINE parent_1 = subjNEWLINE parent_2 = objNEWLINE NEWLINE while not verb:NEWLINE try:NEWLINE parent_1 = parent_1.headNEWLINE parent_2 = parent_2.headNEWLINE except AttributeError:NEWLINE print("Incorrect sentence structure...")NEWLINE breakNEWLINENEWLINE if parent_1 == parent_2:NEWLINE verb = parent_1NEWLINENEWLINE return (str(subj), str(obj), str(verb))NEWLINENEWLINE def parse_named_entities(self, sentence):NEWLINE '''NEWLINE Searches the sentence for proper nounsNEWLINE '''NEWLINE out = self.nlp(sentence)NEWLINE entities = [(ent.text, ent.label_) for ent in out.ents]NEWLINENEWLINE return entitiesNEWLINENEWLINE def is_imperative(self, sentence):NEWLINE '''NEWLINE Check if a given sentence is imperativeNEWLINE '''NEWLINE out = self.nlp(sentence)NEWLINENEWLINE # check if the sentence begins with a present tense verbNEWLINE if out[0].tag_ != 'VB':NEWLINE return FalseNEWLINENEWLINE # check if there is a subjectNEWLINE for word in out:NEWLINE if word.dep == symbols.nsubj:NEWLINE return FalseNEWLINENEWLINE return TrueNEWLINENEWLINE def is_compound(self, sentence):NEWLINE '''NEWLINE Determine if sentence is compound or notNEWLINE '''NEWLINE out = self.nlp(sentence)NEWLINENEWLINE # generators have no inherent method to find lengthNEWLINE num_sentences = sum(1 for sent in out.sents)NEWLINE NEWLINE return num_sentences > 1NEWLINENEWLINE def split_compound_sentence(self, sentence):NEWLINE '''NEWLINE Separate a compound sentence into its clausesNEWLINE '''NEWLINE #TODO: make this workNEWLINE out = self.nlp(sentence)NEWLINENEWLINE return [str(sent) for sent in out.sents]NEWLINE
from ast import BitAndNEWLINEimport importlibNEWLINEfrom cmath import cos, exp, log, log10, pi, sinNEWLINEimport matplotlib.pyplot as mpltNEWLINENEWLINE##################### ##################### #####################NEWLINE##################### ##################### #####################NEWLINENEWLINETWOPI = 2.0 * piNEWLINENEWLINEfs = 44100.0NEWLINEdt = 1.0 / fsNEWLINENEWLINEBW = 0.01NEWLINENEWLINEfc = 200.0NEWLINEbandwidth = 8000.0NEWLINEfc2 = fc + bandwidthNEWLINENEWLINEfc /= fsNEWLINEwc = TWOPI * fcNEWLINENEWLINEfc2 /= fsNEWLINEwc2 = TWOPI * fc2NEWLINENEWLINEmax = int( 4.0 / BW )NEWLINEmax += 1NEWLINENEWLINEprint( "kernelLength = ", max )NEWLINENEWLINEmiddle = int( max * 0.5 )NEWLINENEWLINE##################### NEWLINENEWLINEh = [0.0] * max NEWLINEw = [0.0] * maxNEWLINEtaps = [0.0] * maxNEWLINEx = [0.0] * maxNEWLINENEWLINE##################### NEWLINENEWLINEsum = 0NEWLINEi = 0NEWLINEfor n in range(-middle, middle):NEWLINENEWLINE nm = n + middleNEWLINENEWLINE w[i] = 0.42 - (0.5 * cos((TWOPI*i) / max)) + (0.08 * cos(((2.0*TWOPI) * i) / max))NEWLINENEWLINE if n == 0:NEWLINE h[nm] = (2.0 * fc2) - (2.0 * fc)NEWLINE else:NEWLINE h[nm] = (sin(wc2 * n)/(pi * n)) - (sin(wc * n)/(pi * n))NEWLINE NEWLINE h[nm] *= w[i] NEWLINE i += 1NEWLINENEWLINENEWLINE##################### ##################### #####################NEWLINE##################### ##################### #####################NEWLINENEWLINEnumberOfSeconds = 0.15NEWLINEsimulationLength = int( numberOfSeconds * fs )NEWLINENEWLINEsineSweepData = [0.0] * simulationLengthNEWLINENEWLINEstartFrequency = 1.0NEWLINEendFrequency = 20000.0NEWLINENEWLINET = numberOfSecondsNEWLINEtempOne = TWOPI * startFrequency * TNEWLINEtempTwo = TWOPI * endFrequency * TNEWLINEtempThree = log( tempTwo / tempOne )NEWLINEtempFour = tempOne / tempThreeNEWLINENEWLINEtime = 0.0NEWLINEfor i in range( 0, simulationLength ):NEWLINE sineSweepData[ i ] = sin( tempFour * (exp((time / T) * tempThree) - 1.0) )NEWLINE time += dtNEWLINENEWLINENEWLINE##################### ##################### #####################NEWLINE##################### ##################### #####################NEWLINENEWLINEconvolvedOutput = [0.0] * simulationLengthNEWLINEtemporary = [0.0] * maxNEWLINENEWLINExIndex = 0NEWLINEnewest = 0NEWLINENEWLINEfor i in range( 0, simulationLength ):NEWLINENEWLINE if newest == max:NEWLINE newest = 0NEWLINE NEWLINE temporary[ newest ] = sineSweepData[ i ]NEWLINE xIndex = newestNEWLINENEWLINE accum = 0.0NEWLINE kernel = 0.0NEWLINE for j in range( 0, max ):NEWLINENEWLINE accum += h[ j ] * temporary[ xIndex ]NEWLINE kernel += h[ j ]NEWLINENEWLINE xIndex -= 1NEWLINE if xIndex == -1:NEWLINE xIndex = max - 1NEWLINENEWLINE convolvedOutput[i] = accumNEWLINE newest += 1NEWLINE NEWLINE##################### ##################### #####################NEWLINE##################### ##################### #####################NEWLINENEWLINEfig, (ax1, ax2, ax3) = mplt.subplots(3)NEWLINENEWLINEax1.axis([ 0, max, -1.0, 1.0 ])NEWLINEax1.plot( h )NEWLINENEWLINEax2.axis([ 0, simulationLength, -1.1, 1.1])NEWLINEax2.plot( sineSweepData )NEWLINENEWLINEax3.axis([ 0, simulationLength, -1.1, 1.1])NEWLINEax3.plot( convolvedOutput )NEWLINENEWLINEmplt.show()
# -*- coding: utf-8 -*-NEWLINEimport osNEWLINEimport sysNEWLINENEWLINEcmd = 'coverage run `which djangocms-helper` aldryn_boilerplates test --cms --extra-settings=test_settings'NEWLINENEWLINEsys.exit(os.system(cmd))NEWLINE
# Copyright (c) 2015 OpenStack FoundationNEWLINE# All Rights Reserved.NEWLINE#NEWLINE# Licensed under the Apache License, Version 2.0 (the "License"); you mayNEWLINE# not use this file except in compliance with the License. You may obtainNEWLINE# a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS, WITHOUTNEWLINE# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See theNEWLINE# License for the specific language governing permissions and limitationsNEWLINE# under the License.NEWLINENEWLINE"""Policy Engine For magnum."""NEWLINENEWLINEimport decoratorNEWLINEfrom oslo_config import cfgNEWLINEfrom oslo_policy import policyNEWLINEfrom oslo_utils import importutilsNEWLINEimport pecanNEWLINENEWLINEfrom magnum.common import clientsNEWLINEfrom magnum.common import exceptionNEWLINEfrom magnum.common import policiesNEWLINENEWLINENEWLINE_ENFORCER = NoneNEWLINECONF = cfg.CONFNEWLINENEWLINENEWLINE# we can get a policy enforcer by this init.NEWLINE# oslo policy support change policy rule dynamically.NEWLINE# at present, policy.enforce will reload the policy rules when it checksNEWLINE# the policy files have been touched.NEWLINEdef init(policy_file=None, rules=None,NEWLINE default_rule=None, use_conf=True, overwrite=True):NEWLINE """Init an Enforcer class.NEWLINENEWLINE :param policy_file: Custom policy file to use, if none isNEWLINE specified, ``conf.policy_file`` will beNEWLINE used.NEWLINE :param rules: Default dictionary / Rules to use. It will beNEWLINE considered just in the first instantiation. IfNEWLINE :meth:`load_rules` with ``force_reload=True``,NEWLINE :meth:`clear` or :meth:`set_rules` withNEWLINE ``overwrite=True`` is called this will be overwritten.NEWLINE :param default_rule: Default rule to use, conf.default_rule willNEWLINE be used if none is specified.NEWLINE :param use_conf: Whether to load rules from cache or config file.NEWLINE :param overwrite: Whether to overwrite existing rules when reload rulesNEWLINE from config file.NEWLINE """NEWLINE global _ENFORCERNEWLINE if not _ENFORCER:NEWLINE # http://docs.openstack.org/developer/oslo.policy/usage.htmlNEWLINE _ENFORCER = policy.Enforcer(CONF,NEWLINE policy_file=policy_file,NEWLINE rules=rules,NEWLINE default_rule=default_rule,NEWLINE use_conf=use_conf,NEWLINE overwrite=overwrite)NEWLINE _ENFORCER.register_defaults(policies.list_rules())NEWLINENEWLINE return _ENFORCERNEWLINENEWLINENEWLINEdef enforce(context, rule=None, target=None,NEWLINE do_raise=True, exc=None, *args, **kwargs):NEWLINENEWLINE """Checks authorization of a rule against the target and credentials.NEWLINENEWLINE :param dict context: As much information about the user performing theNEWLINE action as possible.NEWLINE :param rule: The rule to evaluate.NEWLINE :param dict target: As much information about the object being operatedNEWLINE on as possible.NEWLINE :param do_raise: Whether to raise an exception or not if checkNEWLINE fails.NEWLINE :param exc: Class of the exception to raise if the check fails.NEWLINE Any remaining arguments passed to :meth:`enforce` (bothNEWLINE positional and keyword arguments) will be passed toNEWLINE the exception class. If not specified,NEWLINE :class:`PolicyNotAuthorized` will be used.NEWLINENEWLINE :return: ``False`` if the policy does not allow the action and `exc` isNEWLINE not provided; otherwise, returns a value that evaluates toNEWLINE ``True``. Note: for rules using the "case" expression, thisNEWLINE ``True`` value will be the specified string from theNEWLINE expression.NEWLINE """NEWLINE enforcer = init()NEWLINE credentials = context.to_dict()NEWLINE if not exc:NEWLINE exc = exception.PolicyNotAuthorizedNEWLINE if target is None:NEWLINE target = {'project_id': context.project_id,NEWLINE 'user_id': context.user_id}NEWLINE add_policy_attributes(target)NEWLINE return enforcer.enforce(rule, target, credentials,NEWLINE do_raise=do_raise, exc=exc, *args, **kwargs)NEWLINENEWLINENEWLINEdef add_policy_attributes(target):NEWLINE """Adds extra information for policy enforcement to raw target object"""NEWLINE context = importutils.import_module('magnum.common.context')NEWLINE admin_context = context.make_admin_context()NEWLINE admin_osc = clients.OpenStackClients(admin_context)NEWLINE trustee_domain_id = admin_osc.keystone().trustee_domain_idNEWLINE target['trustee_domain_id'] = trustee_domain_idNEWLINE return targetNEWLINENEWLINENEWLINEdef check_is_admin(context):NEWLINE """Whether or not user is admin according to policy setting.NEWLINENEWLINE """NEWLINE init()NEWLINE target = {}NEWLINE credentials = context.to_dict()NEWLINE return _ENFORCER.enforce('context_is_admin', target, credentials)NEWLINENEWLINENEWLINEdef enforce_wsgi(api_name, act=None):NEWLINE """This is a decorator to simplify wsgi action policy rule check.NEWLINENEWLINE :param api_name: The collection name to be evaluate.NEWLINE :param act: The function name of wsgi action.NEWLINENEWLINE example:NEWLINE from magnum.common import policyNEWLINE class ClustersController(rest.RestController):NEWLINE ....NEWLINE @policy.enforce_wsgi("cluster", "delete")NEWLINE @wsme_pecan.wsexpose(None, types.uuid_or_name, status_code=204)NEWLINE def delete(self, cluster_ident):NEWLINE ...NEWLINE """NEWLINE @decorator.decoratorNEWLINE def wrapper(fn, *args, **kwargs):NEWLINE action = "%s:%s" % (api_name, (act or fn.__name__))NEWLINE enforce(pecan.request.context, action,NEWLINE exc=exception.PolicyNotAuthorized, action=action)NEWLINE return fn(*args, **kwargs)NEWLINE return wrapperNEWLINE
from src.interfacing.ogs.connect import AuthenticationNEWLINEimport codecsNEWLINEimport sysNEWLINEimport osNEWLINEfrom time import sleepNEWLINENEWLINEdef loadList(pNameFile): NEWLINE iList = []NEWLINE with codecs.open(pNameFile, "r", "utf-8") as f:NEWLINE for line in f:NEWLINE iList.append(line)NEWLINE return iListNEWLINE NEWLINEif __name__ == "__main__":NEWLINE a = Authentication("Kuksu League", "", testing=False);NEWLINE NEWLINE iGroupNames = loadList("E:/Project/OGS/OGS-League/group_names.txt");NEWLINE fGroupIDs = codecs.open("E:/Project/OGS/OGS-League/group_ids.txt", "w", "utf-8");NEWLINE NEWLINE nGroups = len(iGroupNames);NEWLINE NEWLINE for i in range(nGroups):NEWLINE iGroupNames[i] = iGroupNames[i].replace("\r\n", "")NEWLINE iGroupNames[i] = iGroupNames[i].replace("\n", "")NEWLINE iTournament = a.post(['tournaments'],{NEWLINE "name":"Kuksu Main Title Tournament 9th Cycle - Group %s" % iGroupNames[i],NEWLINE "group":515,NEWLINE "tournament_type":"roundrobin",NEWLINE "description":"Kuksu Main Title Tournament 9th Cycle - Group %s" % iGroupNames[i],NEWLINE "board_size":19,NEWLINE "handicap":0, #default -1 for autoNEWLINE "time_start": "2015-12-01T00:00:00Z",NEWLINE "time_control_parameters":{NEWLINE "time_control":"fischer",NEWLINE "initial_time":604800,NEWLINE "max_time":604800,NEWLINE "time_increment":86400NEWLINE },NEWLINE "rules": "korean",NEWLINE "exclusivity": "invite", # open, group. defaultNEWLINE "exclude_provisional": False, # defaultNEWLINE "auto_start_on_max": True, # defaultNEWLINE "analysis_enabled": True, #defaultNEWLINE "settings":{NEWLINE "maximum_players":10,NEWLINE },NEWLINE "players_start": 10, #defaultNEWLINE "first_pairing_method": "slide", #slaughter, random, slide, strength . defaultNEWLINE "subsequent_pairing_method": "slide", # defaultNEWLINE "min_ranking":0,NEWLINE "max_ranking":36NEWLINE });NEWLINE NEWLINE print("Tournament %s with id %d created.\n" % (iGroupNames[i], iTournament["id"]));NEWLINE fGroupIDs.writelines("%d\n" % iTournament["id"]);NEWLINE sleep(2);NEWLINE NEWLINE fGroupIDs.close();NEWLINE NEWLINE#a.put(['tournaments', 12650], {"description":"Test Test"});NEWLINENEWLINE# tourney id 7370NEWLINE"""NEWLINEiTournament = a.post(['tournaments'],{NEWLINE "id":12650,NEWLINE "name":"Test Tournament 2",NEWLINE "group":515,NEWLINE "tournament_type":"roundrobin",NEWLINE "description":"<b>Test 3</b>",NEWLINE "board_size":19,NEWLINE "handicap":0, #default -1 for autoNEWLINE "time_start": "2015-12-01T00:00:00Z",NEWLINE "time_control_parameters":{NEWLINE "time_control":"fischer",NEWLINE "initial_time":604800,NEWLINE "max_time":604800,NEWLINE "time_increment":86400NEWLINE },NEWLINE "rules": "korean",NEWLINE "exclusivity": "invite", # open, group. defaultNEWLINE "exclude_provisional": False, # defaultNEWLINE "auto_start_on_max": True, # defaultNEWLINE "analysis_enabled": True, #defaultNEWLINE "settings":{NEWLINE "maximum_players":10,NEWLINE },NEWLINE "players_start": 6, #defaultNEWLINE "first_pairing_method": "slide", #slaughter, random, slide, strength . defaultNEWLINE "subsequent_pairing_method": "slide", # defaultNEWLINE "min_ranking":0,NEWLINE "max_ranking":36NEWLINE});NEWLINENEWLINE#print("Hello");NEWLINEprint(iTournament["id"]);NEWLINE"""NEWLINE#print "Tournament %s is created." % iTournament["id"];NEWLINENEWLINE# r= a.post (['tournaments', 12642, 'players'], app_param= {"player_id":40318} )NEWLINE# print (r)NEWLINE
'''NEWLINECreated on May 1, 2015NEWLINENEWLINE@author: zwickerNEWLINE'''NEWLINENEWLINEfrom __future__ import division, absolute_importNEWLINENEWLINEimport numpy as npNEWLINEfrom six.moves import rangeNEWLINENEWLINEfrom .lib_exp_base import LibraryExponentialBaseNEWLINEfrom ..library_numeric_base import LibraryNumericMixin, get_sensitivity_matrixNEWLINENEWLINENEWLINENEWLINEclass LibraryExponentialNumeric(LibraryNumericMixin, LibraryExponentialBase):NEWLINE """ represents a single receptor library that handles continuous mixtures,NEWLINE which are defined by their concentration mean and variance """NEWLINENEWLINE # default parameters that are used to initialize a class if not overwrittenNEWLINE parameters_default = {NEWLINE 'max_num_receptors': 28, #< prevents memory overflowsNEWLINE 'sensitivity_matrix': None, #< will be calculated if not givenNEWLINE 'sensitivity_matrix_params': None, #< parameters determining I_aiNEWLINE 'monte_carlo_steps': 'auto', #< default steps for monte carloNEWLINE 'monte_carlo_steps_min': 1e4, #< minimal steps for monte carloNEWLINE 'monte_carlo_steps_max': 1e5, #< maximal steps for monte carloNEWLINE }NEWLINE NEWLINE NEWLINE @classmethodNEWLINE def create_test_instance(cls, **kwargs):NEWLINE """ creates a test instance used for consistency tests """NEWLINE obj = super(LibraryExponentialNumeric, cls).create_test_instance(**kwargs)NEWLINENEWLINE # determine optimal parameters for the interaction matrixNEWLINE from .lib_exp_theory import LibraryExponentialLogNormalNEWLINE theory = LibraryExponentialLogNormal.from_other(obj)NEWLINE obj.choose_sensitivity_matrix(**theory.get_optimal_library())NEWLINE return objNEWLINE NEWLINENEWLINE @propertyNEWLINE def _sample_steps(self):NEWLINE """ returns the number of steps that are sampled """NEWLINE if self.parameters['monte_carlo_steps'] == 'auto':NEWLINE steps_min = self.parameters['monte_carlo_steps_min']NEWLINE steps_max = self.parameters['monte_carlo_steps_max']NEWLINE steps = np.clip(10 * 2**self.Nr, steps_min, steps_max) NEWLINE # Here, the factor 10 is an arbitrary scaling factorNEWLINE else:NEWLINE steps = self.parameters['monte_carlo_steps']NEWLINE NEWLINE return int(steps)NEWLINE NEWLINE NEWLINE def _sample_mixtures(self, steps=None):NEWLINE """ sample mixtures with uniform probability yielding single mixtures """NEWLINE NEWLINE if steps is None:NEWLINE steps = self._sample_stepsNEWLINE NEWLINE c_means = self.concentration_meansNEWLINE NEWLINE for _ in range(steps):NEWLINE # choose a mixture vector according to substrate probabilitiesNEWLINE yield np.random.exponential(size=self.Ns) * c_meansNEWLINENEWLINENEWLINE def choose_sensitivity_matrix(self, distribution, mean_sensitivity=1,NEWLINE **kwargs):NEWLINE """ chooses the sensitivity matrix """NEWLINE self.sens_mat, sens_mat_params = get_sensitivity_matrix(NEWLINE self.Nr, self.Ns, distribution, mean_sensitivity, **kwargs)NEWLINENEWLINE # save the parameters determining this matrixNEWLINE self.parameters['sensitivity_matrix_params'] = sens_mat_paramsNEWLINENEWLINE choose_sensitivity_matrix.__doc__ = get_sensitivity_matrix.__doc__ NEWLINENEWLINENEWLINE def receptor_activity(self, ret_correlations=False):NEWLINE """ calculates the average activity of each receptor """ NEWLINE return self.receptor_activity_monte_carlo(ret_correlations)NEWLINENEWLINE NEWLINE def mutual_information(self, ret_prob_activity=False):NEWLINE """ calculate the mutual information using a monte carlo strategy. TheNEWLINE number of steps is given by the model parameter 'monte_carlo_steps' """NEWLINE return self.mutual_information_monte_carlo(ret_prob_activity)NEWLINE NEWLINE
import ioNEWLINEimport jsonNEWLINEimport unittestNEWLINEimport tempfileNEWLINEfrom base64 import b64encodeNEWLINENEWLINEfrom ukbrest import appNEWLINEimport pandas as pdNEWLINENEWLINEfrom tests.settings import POSTGRESQL_ENGINENEWLINEfrom tests.utils import get_repository_path, DBTestNEWLINEfrom ukbrest.common.pheno2sql import Pheno2SQLNEWLINEfrom ukbrest.common.utils.auth import PasswordHasherNEWLINENEWLINENEWLINEclass TestRestApiPhenotype(DBTest):NEWLINE def _make_yaml_request(self, yaml_def, section, n_expected_rows, expected_columns):NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_def), 'data.yaml'),NEWLINE 'section': section,NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert pheno_file.shape == (n_expected_rows, len(expected_columns)), pheno_file.shapeNEWLINENEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE return pheno_fileNEWLINENEWLINE def setUp(self, filename=None, load_data=True, wipe_database=True, **kwargs):NEWLINE if wipe_database:NEWLINE super(TestRestApiPhenotype, self).setUp()NEWLINE NEWLINE # Load dataNEWLINE p2sql = self._get_p2sql(filename, **kwargs)NEWLINENEWLINE if load_data:NEWLINE p2sql.load_data()NEWLINENEWLINE app.app.config['pheno2sql'] = p2sqlNEWLINENEWLINE # ConfigureNEWLINE self.configureApp()NEWLINENEWLINE def _get_p2sql(self, filename, **kwargs):NEWLINE if filename is None:NEWLINE csv_file = get_repository_path('pheno2sql/example02.csv')NEWLINE elif isinstance(filename, (tuple, list)):NEWLINE csv_file = tuple([get_repository_path(f) for f in filename])NEWLINE elif isinstance(filename, str):NEWLINE csv_file = get_repository_path(filename)NEWLINE else:NEWLINE raise ValueError('filename unknown type')NEWLINENEWLINE if 'db_uri' not in kwargs:NEWLINE kwargs['db_uri'] = POSTGRESQL_ENGINENEWLINENEWLINE if 'n_columns_per_table' not in kwargs:NEWLINE kwargs['n_columns_per_table'] = 2NEWLINENEWLINE return Pheno2SQL(csv_file, **kwargs)NEWLINENEWLINE def configureApp(self, app_func=None):NEWLINE app.app.config['testing'] = TrueNEWLINE app.app.config['auth'] = NoneNEWLINENEWLINE if app_func is not None:NEWLINE app_func(app.app)NEWLINENEWLINE self.app = app.app.test_client()NEWLINENEWLINE def configureAppWithAuth(self, user_pass_line):NEWLINE f = tempfile.NamedTemporaryFile(delete=False)NEWLINE f.close()NEWLINENEWLINE with open(f.name, 'w') as fi:NEWLINE fi.write(user_pass_line)NEWLINENEWLINE ph = PasswordHasher(f.name, method='pbkdf2:sha256')NEWLINENEWLINE def conf(a):NEWLINE a.config['auth'] = ph.setup_http_basic_auth()NEWLINENEWLINE self.configureApp(conf)NEWLINENEWLINE def _get_http_basic_auth_header(self, user, password):NEWLINE return {'Authorization': 'Basic %s' % b64encode(f'{user}:{password}'.encode()).decode("ascii")}NEWLINENEWLINE def test_not_found(self):NEWLINE response = self.app.get('/ukbrest/api/v1.0/')NEWLINE assert response.status_code == 404, response.status_codeNEWLINENEWLINE def test_phenotype_fields(self):NEWLINE # PrepareNEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype/fields')NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE fields = json.loads(response.data.decode('utf-8'))NEWLINE assert len(fields) == 8NEWLINENEWLINE def test_phenotype_fields_http_auth_no_credentials(self):NEWLINE # PrepareNEWLINE self.configureAppWithAuth('user: thepassword2')NEWLINENEWLINE # RunNEWLINE response = self.app.get(NEWLINE '/ukbrest/api/v1.0/phenotype/fields',NEWLINE # headers=self._get_http_basic_auth_header('user', 'thepassword2'),NEWLINE )NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 401, response.status_codeNEWLINENEWLINE def test_phenotype_fields_http_auth_with_credentials(self):NEWLINE # PrepareNEWLINE self.configureAppWithAuth('user: thepassword2')NEWLINENEWLINE # RunNEWLINE response = self.app.get(NEWLINE '/ukbrest/api/v1.0/phenotype/fields',NEWLINE headers=self._get_http_basic_auth_header('user', 'thepassword2'),NEWLINE )NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE fields = json.loads(response.data.decode('utf-8'))NEWLINE assert len(fields) == 8NEWLINENEWLINE def test_phenotype_fields_http_auth_multiple_users(self):NEWLINE # PrepareNEWLINE self.configureAppWithAuth(NEWLINE 'user: thepassword2\n'NEWLINE 'another_user: another_password'NEWLINE )NEWLINENEWLINE # RunNEWLINE response = self.app.get(NEWLINE '/ukbrest/api/v1.0/phenotype/fields',NEWLINE headers=self._get_http_basic_auth_header('user', 'thepassword2'),NEWLINE )NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE fields = json.loads(response.data.decode('utf-8'))NEWLINE assert len(fields) == 8NEWLINENEWLINE # Run 2NEWLINE response = self.app.get(NEWLINE '/ukbrest/api/v1.0/phenotype/fields',NEWLINE headers=self._get_http_basic_auth_header('another_user', 'another_password'),NEWLINE )NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE fields = json.loads(response.data.decode('utf-8'))NEWLINE assert len(fields) == 8NEWLINENEWLINE def test_phenotype_query_single_column_format_csv(self):NEWLINE # PrepareNEWLINE columns = ['c21_0_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE csv_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), index_col='eid', dtype=str)NEWLINE assert csv_file is not NoneNEWLINE assert not csv_file.emptyNEWLINE assert csv_file.shape == (4, 1)NEWLINENEWLINE assert csv_file.index.name == 'eid'NEWLINE assert len(csv_file.index) == 4NEWLINE assert all(x in csv_file.index for x in range(1, 4 + 1))NEWLINENEWLINE assert len(csv_file.columns) == len(columns)NEWLINE assert all(x in columns for x in csv_file.columns)NEWLINENEWLINE assert csv_file.loc[1, 'c21_0_0'] == 'Option number 1'NEWLINE assert csv_file.loc[2, 'c21_0_0'] == 'Option number 2'NEWLINE assert csv_file.loc[3, 'c21_0_0'] == 'Option number 3'NEWLINE assert csv_file.loc[4, 'c21_0_0'] == 'Option number 4'NEWLINENEWLINE def test_phenotype_query_error_column_does_not_exist(self):NEWLINE # PrepareNEWLINE columns = ['nonexistent_column']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINENEWLINE # with self.app:NEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 400, response.status_codeNEWLINE data = json.load(io.StringIO(response.data.decode('utf-8')))NEWLINENEWLINE assert 'message' in data, dataNEWLINE assert 'column "nonexistent_column" does not exist' in data['message'], data['message']NEWLINENEWLINE assert 'output' not in data, dataNEWLINENEWLINE def test_phenotype_query_error_column_does_not_exist_standard_column_name(self):NEWLINE # PrepareNEWLINE columns = ['c999_0_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINENEWLINE # with self.app:NEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 400, response.status_codeNEWLINE data = json.load(io.StringIO(response.data.decode('utf-8')))NEWLINENEWLINE assert 'status_code' in data, dataNEWLINE assert data['status_code'] == 400, data['status_code']NEWLINENEWLINE assert 'error_type' in data, dataNEWLINE assert data['error_type'] == 'SQL_EXECUTION_ERROR'NEWLINENEWLINE assert 'message' in data, dataNEWLINE assert 'column "c999_0_0" does not exist' in data['message'], data['message']NEWLINENEWLINE assert 'output' not in data, dataNEWLINENEWLINE def test_phenotype_query_error_cannot_connect_to_database(self):NEWLINE # PrepareNEWLINE self.setUp(load_data=False, db_uri='postgresql://test:test@wronghost:5432/ukb')NEWLINENEWLINE columns = ['c21_0_0', 'invalid value here']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINENEWLINE # with self.app:NEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 500, response.status_codeNEWLINE data = json.load(io.StringIO(response.data.decode('utf-8')))NEWLINENEWLINE assert 'status_code' in data, dataNEWLINE assert data['status_code'] == 500, data['status_code']NEWLINENEWLINE assert 'error_type' in data, dataNEWLINE assert data['error_type'] == 'UNKNOWN', data['error_type']NEWLINENEWLINE assert 'message' in data, dataNEWLINE assert 'psycopg2.OperationalError' in data['message'], data['message']NEWLINE assert 'wronghost' in data['message'], data['message']NEWLINENEWLINE def test_phenotype_query_multiple_column_format_csv(self):NEWLINE # PrepareNEWLINE columns = ['c21_0_0', 'c48_0_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE csv_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), index_col='eid', dtype=str)NEWLINE assert csv_file is not NoneNEWLINE assert not csv_file.emptyNEWLINE assert csv_file.shape == (4, 2)NEWLINENEWLINE assert csv_file.index.name == 'eid'NEWLINE assert len(csv_file.index) == 4NEWLINE assert all(x in csv_file.index for x in range(1, 4 + 1))NEWLINENEWLINE assert len(csv_file.columns) == len(columns)NEWLINE assert all(x in columns for x in csv_file.columns)NEWLINENEWLINE assert csv_file.loc[1, 'c21_0_0'] == 'Option number 1'NEWLINE assert csv_file.loc[2, 'c21_0_0'] == 'Option number 2'NEWLINE assert csv_file.loc[3, 'c21_0_0'] == 'Option number 3'NEWLINE assert csv_file.loc[4, 'c21_0_0'] == 'Option number 4'NEWLINENEWLINE assert csv_file.loc[1, 'c48_0_0'] == '2011-08-14'NEWLINE assert csv_file.loc[2, 'c48_0_0'] == '2016-11-30'NEWLINE assert csv_file.loc[3, 'c48_0_0'] == '2010-01-01'NEWLINE assert csv_file.loc[4, 'c48_0_0'] == '2011-02-15'NEWLINENEWLINE def test_phenotype_query_multiple_column_format_pheno(self):NEWLINE # PrepareNEWLINE columns = ['c21_0_0', 'c48_0_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 2 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + columnsNEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c21_0_0'] == 'Option number 1'NEWLINE assert pheno_file.loc[2, 'c21_0_0'] == 'Option number 2'NEWLINE assert pheno_file.loc[3, 'c21_0_0'] == 'Option number 3'NEWLINE assert pheno_file.loc[4, 'c21_0_0'] == 'Option number 4'NEWLINENEWLINE assert pheno_file.loc[1, 'c48_0_0'] == '2011-08-14'NEWLINE assert pheno_file.loc[2, 'c48_0_0'] == '2016-11-30'NEWLINE assert pheno_file.loc[3, 'c48_0_0'] == '2010-01-01'NEWLINE assert pheno_file.loc[4, 'c48_0_0'] == '2011-02-15'NEWLINENEWLINE def test_phenotype_query_multiple_column_renaming(self):NEWLINE # PrepareNEWLINE columns = ['c21_0_0 as c21', 'c31_0_0 c31', 'c48_0_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 3 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + ['c21', 'c31', 'c48_0_0']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c21'] == 'Option number 1'NEWLINE assert pheno_file.loc[2, 'c21'] == 'Option number 2'NEWLINE assert pheno_file.loc[3, 'c21'] == 'Option number 3'NEWLINE assert pheno_file.loc[4, 'c21'] == 'Option number 4'NEWLINENEWLINE assert pheno_file.loc[1, 'c31'] == '2012-01-05'NEWLINE assert pheno_file.loc[2, 'c31'] == '2015-12-30'NEWLINE assert pheno_file.loc[3, 'c31'] == '2007-03-19'NEWLINE assert pheno_file.loc[4, 'c31'] == '2002-05-09'NEWLINENEWLINE assert pheno_file.loc[1, 'c48_0_0'] == '2011-08-14'NEWLINE assert pheno_file.loc[2, 'c48_0_0'] == '2016-11-30'NEWLINE assert pheno_file.loc[3, 'c48_0_0'] == '2010-01-01'NEWLINE assert pheno_file.loc[4, 'c48_0_0'] == '2011-02-15'NEWLINENEWLINE def test_phenotype_query_filtering_with_column_no_mentioned_in_select(self):NEWLINE # PrepareNEWLINE columns = ['c21_0_0 as c21', 'c21_2_0 c21_2']NEWLINE filtering = ["c46_0_0 < 0", "c48_0_0 > '2011-01-01'"]NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE 'filters': filtering,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape[0] == 2NEWLINE assert pheno_file.shape[1] == 2 + 1 # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 2NEWLINE assert all(x in pheno_file.index for x in (1, 2))NEWLINENEWLINE expected_columns = ['IID'] + ['c21', 'c21_2']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINE # column orderNEWLINE assert pheno_file.columns.tolist()[0] == 'IID'NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINENEWLINE assert pheno_file.loc[1, 'c21'] == 'Option number 1'NEWLINE assert pheno_file.loc[2, 'c21'] == 'Option number 2'NEWLINENEWLINE assert pheno_file.loc[1, 'c21_2'] == 'Yes'NEWLINE assert pheno_file.loc[2, 'c21_2'] == 'No'NEWLINENEWLINE def test_phenotype_query_multiple_column_integer_values(self):NEWLINE # PrepareNEWLINE columns = ['c34_0_0', 'c46_0_0', 'c47_0_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 3 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + columnsNEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c34_0_0'] == '21'NEWLINE assert pheno_file.loc[2, 'c34_0_0'] == '12'NEWLINE assert pheno_file.loc[3, 'c34_0_0'] == '1'NEWLINE assert pheno_file.loc[4, 'c34_0_0'] == '17'NEWLINENEWLINE assert pheno_file.loc[1, 'c46_0_0'] == '-9'NEWLINE assert pheno_file.loc[2, 'c46_0_0'] == '-2'NEWLINE assert pheno_file.loc[3, 'c46_0_0'] == '-7'NEWLINE assert pheno_file.loc[4, 'c46_0_0'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'NEWLINE assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'NEWLINE assert pheno_file.loc[3, 'c47_0_0'] == '-5.32471'NEWLINE assert pheno_file.loc[4, 'c47_0_0'] == '55.19832'NEWLINENEWLINE def test_phenotype_query_multiple_column_integer_values_with_nan(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example06_nan_integer.csv')NEWLINENEWLINE columns = ['c34_0_0', 'c46_0_0', 'c47_0_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',NEWLINE keep_default_na=False, index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 3 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + columnsNEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c34_0_0'] == '21'NEWLINE assert pheno_file.loc[2, 'c34_0_0'] == '12'NEWLINE assert pheno_file.loc[3, 'c34_0_0'] == '1'NEWLINE assert pheno_file.loc[4, 'c34_0_0'] == '17'NEWLINENEWLINE assert pheno_file.loc[1, 'c46_0_0'] == '-9'NEWLINE assert pheno_file.loc[2, 'c46_0_0'] == 'NA'NEWLINE assert pheno_file.loc[3, 'c46_0_0'] == '-7'NEWLINE assert pheno_file.loc[4, 'c46_0_0'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'NEWLINE assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'NEWLINE assert pheno_file.loc[3, 'c47_0_0'] == '-5.32471'NEWLINE assert pheno_file.loc[4, 'c47_0_0'] == '55.19832'NEWLINENEWLINE def test_phenotype_query_multiple_column_integer_values_with_nan_using_columns_renaming_with_as(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example06_nan_integer.csv')NEWLINENEWLINE columns = ['c34_0_0 as c34', 'c46_0_0 as c46', 'c47_0_0 as c47']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',NEWLINE keep_default_na=False, index_col='FID', dtype=str)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 3 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + ['c34', 'c46', 'c47']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c34'] == '21'NEWLINE assert pheno_file.loc[2, 'c34'] == '12'NEWLINE assert pheno_file.loc[3, 'c34'] == '1'NEWLINE assert pheno_file.loc[4, 'c34'] == '17'NEWLINENEWLINE assert pheno_file.loc[1, 'c46'] == '-9', pheno_file.loc[1, 'c46']NEWLINE assert pheno_file.loc[2, 'c46'] == 'NA'NEWLINE assert pheno_file.loc[3, 'c46'] == '-7'NEWLINE assert pheno_file.loc[4, 'c46'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c47'] == '45.55412'NEWLINE assert pheno_file.loc[2, 'c47'] == '-0.55461'NEWLINE assert pheno_file.loc[3, 'c47'] == '-5.32471'NEWLINE assert pheno_file.loc[4, 'c47'] == '55.19832'NEWLINENEWLINE def test_phenotype_query_multiple_column_integer_values_with_nan_using_columns_renaming_with_as_uppercase(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example06_nan_integer.csv')NEWLINENEWLINE columns = ['c34_0_0 as c34', 'c46_0_0 AS c46', 'c47_0_0 as c47']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',NEWLINE keep_default_na=False, index_col='FID', dtype=str)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 3 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + ['c34', 'c46', 'c47']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c34'] == '21'NEWLINE assert pheno_file.loc[2, 'c34'] == '12'NEWLINE assert pheno_file.loc[3, 'c34'] == '1'NEWLINE assert pheno_file.loc[4, 'c34'] == '17'NEWLINENEWLINE assert pheno_file.loc[1, 'c46'] == '-9', pheno_file.loc[1, 'c46']NEWLINE assert pheno_file.loc[2, 'c46'] == 'NA'NEWLINE assert pheno_file.loc[3, 'c46'] == '-7'NEWLINE assert pheno_file.loc[4, 'c46'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c47'] == '45.55412'NEWLINE assert pheno_file.loc[2, 'c47'] == '-0.55461'NEWLINE assert pheno_file.loc[3, 'c47'] == '-5.32471'NEWLINE assert pheno_file.loc[4, 'c47'] == '55.19832'NEWLINENEWLINE def test_phenotype_query_multiple_column_integer_values_with_nan_using_columns_renaming_with_space(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example06_nan_integer.csv')NEWLINENEWLINE columns = ['c34_0_0 as c34', 'c46_0_0 c46', 'c47_0_0 as c47']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',NEWLINE keep_default_na=False, index_col='FID', dtype=str)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 3 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + ['c34', 'c46', 'c47']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c34'] == '21'NEWLINE assert pheno_file.loc[2, 'c34'] == '12'NEWLINE assert pheno_file.loc[3, 'c34'] == '1'NEWLINE assert pheno_file.loc[4, 'c34'] == '17'NEWLINENEWLINE assert pheno_file.loc[1, 'c46'] == '-9', pheno_file.loc[1, 'c46']NEWLINE assert pheno_file.loc[2, 'c46'] == 'NA'NEWLINE assert pheno_file.loc[3, 'c46'] == '-7'NEWLINE assert pheno_file.loc[4, 'c46'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c47'] == '45.55412'NEWLINE assert pheno_file.loc[2, 'c47'] == '-0.55461'NEWLINE assert pheno_file.loc[3, 'c47'] == '-5.32471'NEWLINE assert pheno_file.loc[4, 'c47'] == '55.19832'NEWLINENEWLINE def test_phenotype_query_multiple_column_integer_values_with_nan_using_reg_exp(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example06_nan_integer.csv')NEWLINENEWLINE columns = ['c34_0_0 as c34']NEWLINE reg_exp_columns = ['c4[67]_0_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE 'ecolumns': reg_exp_columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',NEWLINE keep_default_na=False, index_col='FID', dtype=str)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 3 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + ['c34', 'c46_0_0', 'c47_0_0']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c34'] == '21'NEWLINE assert pheno_file.loc[2, 'c34'] == '12'NEWLINE assert pheno_file.loc[3, 'c34'] == '1'NEWLINE assert pheno_file.loc[4, 'c34'] == '17'NEWLINENEWLINE assert pheno_file.loc[1, 'c46_0_0'] == '-9', pheno_file.loc[1, 'c46']NEWLINE assert pheno_file.loc[2, 'c46_0_0'] == 'NA'NEWLINE assert pheno_file.loc[3, 'c46_0_0'] == '-7'NEWLINE assert pheno_file.loc[4, 'c46_0_0'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'NEWLINE assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'NEWLINE assert pheno_file.loc[3, 'c47_0_0'] == '-5.32471'NEWLINE assert pheno_file.loc[4, 'c47_0_0'] == '55.19832'NEWLINENEWLINE def test_phenotype_query_multiple_column_create_field_from_integer(self):NEWLINE # PrepareNEWLINE columns = ['c34_0_0', 'c46_0_0', 'c47_0_0', 'c46_0_0^2 as squared']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 4 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + columnsNEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x.split()[-1] in pheno_file.columns for x in expected_columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c34_0_0'] == '21'NEWLINE assert pheno_file.loc[2, 'c34_0_0'] == '12'NEWLINE assert pheno_file.loc[3, 'c34_0_0'] == '1'NEWLINE assert pheno_file.loc[4, 'c34_0_0'] == '17'NEWLINENEWLINE assert pheno_file.loc[1, 'c46_0_0'] == '-9'NEWLINE assert pheno_file.loc[2, 'c46_0_0'] == '-2'NEWLINE assert pheno_file.loc[3, 'c46_0_0'] == '-7'NEWLINE assert pheno_file.loc[4, 'c46_0_0'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'NEWLINE assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'NEWLINE assert pheno_file.loc[3, 'c47_0_0'] == '-5.32471'NEWLINE assert pheno_file.loc[4, 'c47_0_0'] == '55.19832'NEWLINENEWLINE # square results in float typeNEWLINE assert pheno_file.loc[1, 'squared'] == '81.0'NEWLINE assert pheno_file.loc[2, 'squared'] == '4.0'NEWLINE assert pheno_file.loc[3, 'squared'] == '49.0'NEWLINE assert pheno_file.loc[4, 'squared'] == '16.0'NEWLINENEWLINE def test_phenotype_query_multiple_column_create_field_from_integer_return_integer(self):NEWLINE # PrepareNEWLINE columns = ['c34_0_0', 'c46_0_0', 'c47_0_0', 'c46_0_0 + 1 as sum']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 4 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + columnsNEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x.split()[-1] in pheno_file.columns for x in expected_columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c34_0_0'] == '21'NEWLINE assert pheno_file.loc[2, 'c34_0_0'] == '12'NEWLINE assert pheno_file.loc[3, 'c34_0_0'] == '1'NEWLINE assert pheno_file.loc[4, 'c34_0_0'] == '17'NEWLINENEWLINE assert pheno_file.loc[1, 'c46_0_0'] == '-9'NEWLINE assert pheno_file.loc[2, 'c46_0_0'] == '-2'NEWLINE assert pheno_file.loc[3, 'c46_0_0'] == '-7'NEWLINE assert pheno_file.loc[4, 'c46_0_0'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'NEWLINE assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'NEWLINE assert pheno_file.loc[3, 'c47_0_0'] == '-5.32471'NEWLINE assert pheno_file.loc[4, 'c47_0_0'] == '55.19832'NEWLINENEWLINE # square results in float typeNEWLINE assert pheno_file.loc[1, 'sum'] == '-8'NEWLINE assert pheno_file.loc[2, 'sum'] == '-1'NEWLINE assert pheno_file.loc[3, 'sum'] == '-6'NEWLINE assert pheno_file.loc[4, 'sum'] == '5'NEWLINENEWLINE def test_phenotype_query_multiple_column_create_field_from_float(self):NEWLINE # PrepareNEWLINE columns = ['c34_0_0', 'c46_0_0', 'c47_0_0', 'c47_0_0^2 as squared']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 4 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + columnsNEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x.split()[-1] in pheno_file.columns for x in expected_columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c34_0_0'] == '21'NEWLINE assert pheno_file.loc[2, 'c34_0_0'] == '12'NEWLINE assert pheno_file.loc[3, 'c34_0_0'] == '1'NEWLINE assert pheno_file.loc[4, 'c34_0_0'] == '17'NEWLINENEWLINE assert pheno_file.loc[1, 'c46_0_0'] == '-9'NEWLINE assert pheno_file.loc[2, 'c46_0_0'] == '-2'NEWLINE assert pheno_file.loc[3, 'c46_0_0'] == '-7'NEWLINE assert pheno_file.loc[4, 'c46_0_0'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'NEWLINE assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'NEWLINE assert pheno_file.loc[3, 'c47_0_0'] == '-5.32471'NEWLINE assert pheno_file.loc[4, 'c47_0_0'] == '55.19832'NEWLINENEWLINE # square results in float typeNEWLINE assert pheno_file.loc[1, 'squared'] == '2075.1778489744'NEWLINE assert pheno_file.loc[2, 'squared'] == '0.3075922521'NEWLINE assert pheno_file.loc[3, 'squared'] == '28.3525365841'NEWLINE assert pheno_file.loc[4, 'squared'] == '3046.8545308224'NEWLINENEWLINE def test_phenotype_query_multiple_column_create_field_from_str(self):NEWLINE # PrepareNEWLINE columns = ['c34_0_0', 'c46_0_0', 'c47_0_0', 'c21_0_0', '(c21_0_0 || \' end \' || eid) as result']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 5 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + columnsNEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x.split()[-1] in pheno_file.columns for x in expected_columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE # square results in float typeNEWLINE assert pheno_file.loc[1, 'result'] == 'Option number 1 end 1'NEWLINE assert pheno_file.loc[2, 'result'] == 'Option number 2 end 2'NEWLINE assert pheno_file.loc[3, 'result'] == 'Option number 3 end 3'NEWLINE assert pheno_file.loc[4, 'result'] == 'Option number 4 end 4'NEWLINENEWLINE def test_phenotype_query_format_pheno_missing_data(self):NEWLINE # PrepareNEWLINE columns = ['c21_0_0', 'c21_1_0', 'c48_0_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE # na_values='' is necessary to not overwrite NA strings hereNEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t',NEWLINE na_values='', keep_default_na=False, index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 3 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + columnsNEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c21_0_0'] == 'Option number 1'NEWLINE assert pheno_file.loc[2, 'c21_0_0'] == 'Option number 2'NEWLINE assert pheno_file.loc[3, 'c21_0_0'] == 'Option number 3'NEWLINE assert pheno_file.loc[4, 'c21_0_0'] == 'Option number 4'NEWLINENEWLINE assert pheno_file.loc[1, 'c21_1_0'] == 'No response'NEWLINE assert pheno_file.loc[2, 'c21_1_0'] == 'NA'NEWLINE assert pheno_file.loc[3, 'c21_1_0'] == 'Of course'NEWLINE assert pheno_file.loc[4, 'c21_1_0'] == 'I don\'t know'NEWLINENEWLINE assert pheno_file.loc[1, 'c48_0_0'] == '2011-08-14'NEWLINE assert pheno_file.loc[2, 'c48_0_0'] == '2016-11-30'NEWLINE assert pheno_file.loc[3, 'c48_0_0'] == '2010-01-01'NEWLINE assert pheno_file.loc[4, 'c48_0_0'] == '2011-02-15'NEWLINENEWLINE def test_phenotype_query_format_pheno_missing_date(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example05_missing_date.csv')NEWLINENEWLINE columns = ['c21_0_0', 'c21_1_0', 'c48_0_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'text/plink2'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE # na_values='' is necessary to not overwrite NA strings hereNEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t',NEWLINE na_values='', keep_default_na=False, index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 3 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + columnsNEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c48_0_0'] == '2011-08-14'NEWLINE assert pheno_file.loc[2, 'c48_0_0'] == '2016-11-30'NEWLINE assert pheno_file.loc[3, 'c48_0_0'] == 'NA'NEWLINE assert pheno_file.loc[4, 'c48_0_0'] == '2011-02-15'NEWLINENEWLINE def test_phenotype_query_multiple_column_no_format(self):NEWLINE # PrepareNEWLINE columns = ['c21_0_0', 'c48_0_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters)NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (4, 2 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 4NEWLINE assert all(x in pheno_file.index for x in range(1, 4 + 1))NEWLINENEWLINE expected_columns = ['IID'] + columnsNEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINE # column orderNEWLINE assert pheno_file.columns.tolist()[0] == 'IID'NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINENEWLINE assert pheno_file.loc[1, 'c21_0_0'] == 'Option number 1'NEWLINE assert pheno_file.loc[2, 'c21_0_0'] == 'Option number 2'NEWLINE assert pheno_file.loc[3, 'c21_0_0'] == 'Option number 3'NEWLINE assert pheno_file.loc[4, 'c21_0_0'] == 'Option number 4'NEWLINENEWLINE assert pheno_file.loc[1, 'c48_0_0'] == '2011-08-14'NEWLINE assert pheno_file.loc[2, 'c48_0_0'] == '2016-11-30'NEWLINE assert pheno_file.loc[3, 'c48_0_0'] == '2010-01-01'NEWLINE assert pheno_file.loc[4, 'c48_0_0'] == '2011-02-15'NEWLINENEWLINE def test_phenotype_query_multiple_column_format_not_supported(self):NEWLINE # PrepareNEWLINE columns = ['c21_0_0', 'c48_0_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters, headers={'accept': 'application/json'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 400, response.status_codeNEWLINE data = json.load(io.StringIO(response.data.decode('utf-8')))NEWLINENEWLINE assert 'status_code' in data, dataNEWLINE assert data['status_code'] == 400, data['status_code']NEWLINENEWLINE assert 'error_type' in data, dataNEWLINE assert data['error_type'] == 'UNKNOWN', data['error_type']NEWLINENEWLINE assert 'message' in data, dataNEWLINE assert 'are supported' in str(data['message']), data['message']NEWLINE assert 'text/plink2' in str(data['message']), data['message']NEWLINENEWLINE def test_phenotype_query_with_filtering(self):NEWLINE # PrepareNEWLINE columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c48_0_0']NEWLINE filtering = ["c48_0_0 > '2011-01-01'", "c21_2_0 <> ''"]NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE 'filters': filtering,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape[0] == 2NEWLINE assert pheno_file.shape[1] == 4 + 1 # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 2NEWLINE assert all(x in pheno_file.index for x in (1, 2))NEWLINENEWLINE expected_columns = ['IID'] + columnsNEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINE # column orderNEWLINE assert pheno_file.columns.tolist()[0] == 'IID'NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINENEWLINE assert pheno_file.loc[1, 'c21_0_0'] == 'Option number 1'NEWLINE assert pheno_file.loc[2, 'c21_0_0'] == 'Option number 2'NEWLINENEWLINE assert pheno_file.loc[1, 'c21_2_0'] == 'Yes'NEWLINE assert pheno_file.loc[2, 'c21_2_0'] == 'No'NEWLINENEWLINE assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'NEWLINE assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'NEWLINENEWLINE assert pheno_file.loc[1, 'c48_0_0'] == '2011-08-14'NEWLINE assert pheno_file.loc[2, 'c48_0_0'] == '2016-11-30'NEWLINENEWLINE def test_phenotype_query_columns_with_regular_expression_and_standard_columns(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example09_with_arrays.csv')NEWLINENEWLINE columns = ['c21_0_0', 'c48_0_0']NEWLINE reg_exp_columns = ['c84_0_\d+']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE 'ecolumns': reg_exp_columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',NEWLINE keep_default_na=False, index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (5, 5 + 1), pheno_file.shape # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 5NEWLINE assert all(x in pheno_file.index for x in range(1, 5 + 1))NEWLINENEWLINE expected_columns = ['IID'] + columns + ['c84_0_0', 'c84_0_1', 'c84_0_2']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINE # column orderNEWLINE assert pheno_file.columns.tolist()[0] == 'IID'NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINE assert pheno_file.loc[5, 'IID'] == '5'NEWLINENEWLINE assert pheno_file.loc[1, 'c21_0_0'] == 'Option number 1'NEWLINE assert pheno_file.loc[2, 'c21_0_0'] == 'Option number 2'NEWLINE assert pheno_file.loc[3, 'c21_0_0'] == 'Option number 3'NEWLINE assert pheno_file.loc[4, 'c21_0_0'] == "Option number 4"NEWLINE assert pheno_file.loc[5, 'c21_0_0'] == "Option number 5"NEWLINENEWLINE assert pheno_file.loc[1, 'c48_0_0'] == '2010-07-14'NEWLINE assert pheno_file.loc[2, 'c48_0_0'] == '2017-11-30'NEWLINE assert pheno_file.loc[3, 'c48_0_0'] == '2020-01-01'NEWLINE assert pheno_file.loc[4, 'c48_0_0'] == '1990-02-15'NEWLINE assert pheno_file.loc[5, 'c48_0_0'] == '1999-10-11'NEWLINENEWLINE assert pheno_file.loc[1, 'c84_0_0'] == '11', pheno_file.loc[1, 'c84_0_0']NEWLINE assert pheno_file.loc[2, 'c84_0_0'] == '-21'NEWLINE assert pheno_file.loc[3, 'c84_0_0'] == 'NA'NEWLINE assert pheno_file.loc[4, 'c84_0_0'] == '41'NEWLINE assert pheno_file.loc[5, 'c84_0_0'] == '51'NEWLINENEWLINE assert pheno_file.loc[1, 'c84_0_1'] == '1', pheno_file.loc[1, 'c84_0_1']NEWLINE assert pheno_file.loc[2, 'c84_0_1'] == '99'NEWLINE assert pheno_file.loc[3, 'c84_0_1'] == '98'NEWLINE assert pheno_file.loc[4, 'c84_0_1'] == '-37'NEWLINE assert pheno_file.loc[5, 'c84_0_1'] == '36'NEWLINENEWLINE assert pheno_file.loc[1, 'c84_0_2'] == '999'NEWLINE assert pheno_file.loc[2, 'c84_0_2'] == '152'NEWLINE assert pheno_file.loc[3, 'c84_0_2'] == '-68'NEWLINE assert pheno_file.loc[4, 'c84_0_2'] == 'NA'NEWLINE assert pheno_file.loc[5, 'c84_0_2'] == '-445'NEWLINENEWLINE def test_phenotype_query_columns_with_regular_expression_only(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example09_with_arrays.csv')NEWLINENEWLINE reg_exp_columns = ['c84_0_\d+']NEWLINENEWLINE parameters = {NEWLINE 'ecolumns': reg_exp_columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',NEWLINE keep_default_na=False, index_col='FID', dtype=str)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (5, 3 + 1), pheno_file.shape # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 5NEWLINE assert all(x in pheno_file.index for x in range(1, 5 + 1))NEWLINENEWLINE expected_columns = ['IID'] + ['c84_0_0', 'c84_0_1', 'c84_0_2']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINE # column orderNEWLINE assert pheno_file.columns.tolist()[0] == 'IID'NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINE assert pheno_file.loc[5, 'IID'] == '5'NEWLINENEWLINE assert pheno_file.loc[1, 'c84_0_0'] == '11', pheno_file.loc[1, 'c84_0_0']NEWLINE assert pheno_file.loc[2, 'c84_0_0'] == '-21'NEWLINE assert pheno_file.loc[3, 'c84_0_0'] == 'NA'NEWLINE assert pheno_file.loc[4, 'c84_0_0'] == '41'NEWLINE assert pheno_file.loc[5, 'c84_0_0'] == '51'NEWLINENEWLINE assert pheno_file.loc[1, 'c84_0_1'] == '1', pheno_file.loc[1, 'c84_0_1']NEWLINE assert pheno_file.loc[2, 'c84_0_1'] == '99'NEWLINE assert pheno_file.loc[3, 'c84_0_1'] == '98'NEWLINE assert pheno_file.loc[4, 'c84_0_1'] == '-37'NEWLINE assert pheno_file.loc[5, 'c84_0_1'] == '36'NEWLINENEWLINE assert pheno_file.loc[1, 'c84_0_2'] == '999'NEWLINE assert pheno_file.loc[2, 'c84_0_2'] == '152'NEWLINE assert pheno_file.loc[3, 'c84_0_2'] == '-68'NEWLINE assert pheno_file.loc[4, 'c84_0_2'] == 'NA'NEWLINE assert pheno_file.loc[5, 'c84_0_2'] == '-445'NEWLINENEWLINE def test_phenotype_query_columns_pheno2sql_instance_not_loaded(self):NEWLINE """This test uses a different Pheno2SQL instance without previous loading"""NEWLINENEWLINE # PrepareNEWLINE csv01 = get_repository_path('pheno2sql/example08_01.csv')NEWLINE csv02 = get_repository_path('pheno2sql/example08_02.csv')NEWLINE csvs = (csv01, csv02)NEWLINENEWLINE # first load dataNEWLINE self.setUp(csvs)NEWLINENEWLINE # then create another instance without executing load_data methodNEWLINE self.setUp(csvs, load_data=False, wipe_database=False)NEWLINENEWLINE columns = ['c48_0_0', 'c120_0_0 as c120', 'c150_0_0 c150']NEWLINE reg_exp_columns = ['c21_[01]_0', 'c100_\d_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE 'ecolumns': reg_exp_columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',NEWLINE keep_default_na=False, index_col='FID', dtype=str)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (5, 8 + 1), pheno_file.shape # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert len(pheno_file.index) == 5NEWLINE assert all(x in pheno_file.index for x in range(1, 5 + 1))NEWLINENEWLINE expected_columns = ['IID'] + ['c21_0_0', 'c21_1_0', 'c48_0_0', 'c120', 'c150', 'c100_0_0', 'c100_1_0', 'c100_2_0']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINE # column orderNEWLINE assert pheno_file.columns.tolist()[0] == 'IID'NEWLINENEWLINE assert pheno_file.loc[1, 'IID'] == '1'NEWLINE assert pheno_file.loc[2, 'IID'] == '2'NEWLINE assert pheno_file.loc[3, 'IID'] == '3'NEWLINE assert pheno_file.loc[4, 'IID'] == '4'NEWLINE assert pheno_file.loc[5, 'IID'] == '5'NEWLINENEWLINE assert pheno_file.loc[1, 'c21_0_0'] == 'Option number 1'NEWLINE assert pheno_file.loc[2, 'c21_0_0'] == 'Option number 2'NEWLINE assert pheno_file.loc[3, 'c21_0_0'] == 'Option number 3'NEWLINE assert pheno_file.loc[4, 'c21_0_0'] == 'Option number 4'NEWLINE assert pheno_file.loc[5, 'c21_0_0'] == 'Option number 5'NEWLINENEWLINE assert pheno_file.loc[1, 'c21_1_0'] == 'No response'NEWLINE assert pheno_file.loc[2, 'c21_1_0'] == 'NA'NEWLINE assert pheno_file.loc[3, 'c21_1_0'] == 'Of course'NEWLINE assert pheno_file.loc[4, 'c21_1_0'] == "I don't know"NEWLINE assert pheno_file.loc[5, 'c21_1_0'] == 'Maybe'NEWLINENEWLINE assert pheno_file.loc[1, 'c48_0_0'] == '2010-07-14'NEWLINE assert pheno_file.loc[2, 'c48_0_0'] == '2017-11-30'NEWLINE assert pheno_file.loc[3, 'c48_0_0'] == '2020-01-01'NEWLINE assert pheno_file.loc[4, 'c48_0_0'] == '1990-02-15'NEWLINE assert pheno_file.loc[5, 'c48_0_0'] == '1999-10-11'NEWLINENEWLINE assert pheno_file.loc[1, 'c100_0_0'] == '-9', pheno_file.loc[1, 'c100_0_0']NEWLINE assert pheno_file.loc[2, 'c100_0_0'] == '-2'NEWLINE assert pheno_file.loc[3, 'c100_0_0'] == 'NA'NEWLINE assert pheno_file.loc[4, 'c100_0_0'] == 'NA'NEWLINE assert pheno_file.loc[5, 'c100_0_0'] == 'NA'NEWLINENEWLINE assert pheno_file.loc[1, 'c100_1_0'] == '3', pheno_file.loc[1, 'c100_1_0']NEWLINE assert pheno_file.loc[2, 'c100_1_0'] == '3'NEWLINE assert pheno_file.loc[3, 'c100_1_0'] == '-4'NEWLINE assert pheno_file.loc[4, 'c100_1_0'] == 'NA'NEWLINE assert pheno_file.loc[5, 'c100_1_0'] == 'NA'NEWLINENEWLINE assert pheno_file.loc[1, 'c100_2_0'] == 'NA', pheno_file.loc[1, 'c100_2_0']NEWLINE assert pheno_file.loc[2, 'c100_2_0'] == '1'NEWLINE assert pheno_file.loc[3, 'c100_2_0'] == '-10'NEWLINE assert pheno_file.loc[4, 'c100_2_0'] == 'NA'NEWLINE assert pheno_file.loc[5, 'c100_2_0'] == 'NA'NEWLINENEWLINE def test_phenotype_query_http_basic_auth_is_null(self):NEWLINE # PrepareNEWLINE csv01 = get_repository_path('pheno2sql/example08_01.csv')NEWLINE csv02 = get_repository_path('pheno2sql/example08_02.csv')NEWLINE csvs = (csv01, csv02)NEWLINENEWLINE # first load dataNEWLINE self.setUp(csvs)NEWLINENEWLINE # then create another instance without executing load_data methodNEWLINE self.setUp(csvs, load_data=False, wipe_database=False)NEWLINENEWLINE def configure_http_auth(theapp):NEWLINE theapp.config['auth'] = NoneNEWLINENEWLINE self.configureApp(configure_http_auth)NEWLINENEWLINE columns = ['c48_0_0', 'c120_0_0 as c120', 'c150_0_0 c150']NEWLINE reg_exp_columns = ['c21_[01]_0', 'c100_\d_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE 'ecolumns': reg_exp_columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)NEWLINENEWLINE # ValidateNEWLINE # unauthorizedNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE def test_phenotype_query_http_basic_auth_no_user_pass(self):NEWLINE # PrepareNEWLINE csv01 = get_repository_path('pheno2sql/example08_01.csv')NEWLINE csv02 = get_repository_path('pheno2sql/example08_02.csv')NEWLINE csvs = (csv01, csv02)NEWLINENEWLINE # first load dataNEWLINE self.setUp(csvs)NEWLINENEWLINE # then create another instance without executing load_data methodNEWLINE self.setUp(csvs, load_data=False, wipe_database=False)NEWLINENEWLINE self.configureAppWithAuth('user: thepassword2')NEWLINENEWLINE columns = ['c48_0_0', 'c120_0_0 as c120', 'c150_0_0 c150']NEWLINE reg_exp_columns = ['c21_[01]_0', 'c100_\d_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE 'ecolumns': reg_exp_columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)NEWLINENEWLINE # ValidateNEWLINE # unauthorizedNEWLINE assert response.status_code == 401, response.status_codeNEWLINENEWLINE def test_phenotype_query_http_basic_auth_with_user_pass(self):NEWLINE # PrepareNEWLINE csv01 = get_repository_path('pheno2sql/example08_01.csv')NEWLINE csv02 = get_repository_path('pheno2sql/example08_02.csv')NEWLINE csvs = (csv01, csv02)NEWLINENEWLINE # first load dataNEWLINE self.setUp(csvs)NEWLINENEWLINE # then create another instance without executing load_data methodNEWLINE self.setUp(csvs, load_data=False, wipe_database=False)NEWLINENEWLINE self.configureAppWithAuth('user: thepassword2')NEWLINENEWLINE columns = ['c48_0_0', 'c120_0_0 as c120', 'c150_0_0 c150']NEWLINE reg_exp_columns = ['c21_[01]_0', 'c100_\d_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE 'ecolumns': reg_exp_columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get(NEWLINE '/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters,NEWLINE headers=self._get_http_basic_auth_header('user', 'thepassword2'),NEWLINE )NEWLINENEWLINE # ValidateNEWLINE # unauthorizedNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',NEWLINE keep_default_na=False, index_col='FID', dtype=str)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (5, 8 + 1), pheno_file.shape # plus IIDNEWLINENEWLINE def test_phenotype_query_http_basic_auth_with_wrong_pass(self):NEWLINE # PrepareNEWLINE csv01 = get_repository_path('pheno2sql/example08_01.csv')NEWLINE csv02 = get_repository_path('pheno2sql/example08_02.csv')NEWLINE csvs = (csv01, csv02)NEWLINENEWLINE # first load dataNEWLINE self.setUp(csvs)NEWLINENEWLINE # then create another instance without executing load_data methodNEWLINE self.setUp(csvs, load_data=False, wipe_database=False)NEWLINENEWLINE self.configureAppWithAuth('user: anotherpass')NEWLINENEWLINE columns = ['c48_0_0', 'c120_0_0 as c120', 'c150_0_0 c150']NEWLINE reg_exp_columns = ['c21_[01]_0', 'c100_\d_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE 'ecolumns': reg_exp_columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get(NEWLINE '/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters,NEWLINE headers=self._get_http_basic_auth_header('user', 'thepassword2')NEWLINE )NEWLINENEWLINE # ValidateNEWLINE # unauthorizedNEWLINE assert response.status_code == 401, response.status_codeNEWLINENEWLINE def test_phenotype_query_http_basic_auth_with_wrong_user(self):NEWLINE # PrepareNEWLINE csv01 = get_repository_path('pheno2sql/example08_01.csv')NEWLINE csv02 = get_repository_path('pheno2sql/example08_02.csv')NEWLINE csvs = (csv01, csv02)NEWLINENEWLINE # first load dataNEWLINE self.setUp(csvs)NEWLINENEWLINE # then create another instance without executing load_data methodNEWLINE self.setUp(csvs, load_data=False, wipe_database=False)NEWLINENEWLINE self.configureAppWithAuth('anotheruser: thepassword2')NEWLINENEWLINE columns = ['c48_0_0', 'c120_0_0 as c120', 'c150_0_0 c150']NEWLINE reg_exp_columns = ['c21_[01]_0', 'c100_\d_0']NEWLINENEWLINE parameters = {NEWLINE 'columns': columns,NEWLINE 'ecolumns': reg_exp_columns,NEWLINE }NEWLINENEWLINE # RunNEWLINE response = self.app.get(NEWLINE '/ukbrest/api/v1.0/phenotype',NEWLINE query_string=parameters,NEWLINE headers=self._get_http_basic_auth_header('user', 'thepassword2'),NEWLINE )NEWLINENEWLINE # ValidateNEWLINE # unauthorizedNEWLINE assert response.status_code == 401, response.status_codeNEWLINENEWLINE def test_phenotype_query_yaml_get_covariates(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example10/example10_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE covariates:NEWLINE field_name_34: c34_0_0NEWLINE field_name_47: c47_0_0NEWLINE NEWLINE fields:NEWLINE instance0: c21_0_0NEWLINE instance1: c21_1_0NEWLINE instance2: c21_2_0NEWLINE """NEWLINENEWLINE # RunNEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'covariates',NEWLINE })NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,NEWLINE na_values='', keep_default_na=False)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (5, 2 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert all(x in pheno_file.index for x in (1000010, 1000020, 1000030, 1000040, 1000050))NEWLINENEWLINE expected_columns = ['IID'] + ['field_name_34', 'field_name_47']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINE # column orderNEWLINE assert pheno_file.columns.tolist()[0] == 'IID'NEWLINENEWLINE assert pheno_file.loc[1000010, 'IID'] == '1000010'NEWLINE assert pheno_file.loc[1000010, 'field_name_34'] == '-33'NEWLINE assert pheno_file.loc[1000010, 'field_name_47'] == '41.55312'NEWLINENEWLINE assert pheno_file.loc[1000020, 'IID'] == '1000020'NEWLINE assert pheno_file.loc[1000020, 'field_name_34'] == '34'NEWLINE assert pheno_file.loc[1000020, 'field_name_47'] == '-10.51461'NEWLINENEWLINE assert pheno_file.loc[1000030, 'IID'] == '1000030'NEWLINE assert pheno_file.loc[1000030, 'field_name_34'] == '0'NEWLINE assert pheno_file.loc[1000030, 'field_name_47'] == '-35.31471'NEWLINENEWLINE assert pheno_file.loc[1000040, 'IID'] == '1000040'NEWLINE assert pheno_file.loc[1000040, 'field_name_34'] == '3'NEWLINE assert pheno_file.loc[1000040, 'field_name_47'] == '5.20832'NEWLINENEWLINE assert pheno_file.loc[1000050, 'IID'] == '1000050'NEWLINE assert pheno_file.loc[1000050, 'field_name_34'] == '-4'NEWLINE assert pheno_file.loc[1000050, 'field_name_47'] == 'NA'NEWLINENEWLINE def test_phenotype_query_yaml_get_covariates_http_auth_with_no_credentials(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example10/example10_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE self.configureAppWithAuth('user: thepassword2')NEWLINENEWLINE yaml_data = b"""NEWLINE covariates:NEWLINE field_name_34: c34_0_0NEWLINE field_name_47: c47_0_0NEWLINENEWLINE fields:NEWLINE instance0: c21_0_0NEWLINE instance1: c21_1_0NEWLINE instance2: c21_2_0NEWLINE """NEWLINENEWLINE # RunNEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'covariates',NEWLINE })NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 401, response.status_codeNEWLINENEWLINE def test_phenotype_query_yaml_get_covariates_http_auth_with_credentials(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example10/example10_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE self.configureAppWithAuth('user: thepassword2')NEWLINENEWLINE yaml_data = b"""NEWLINE covariates:NEWLINE field_name_34: c34_0_0NEWLINE field_name_47: c47_0_0NEWLINENEWLINE fields:NEWLINE instance0: c21_0_0NEWLINE instance1: c21_1_0NEWLINE instance2: c21_2_0NEWLINE """NEWLINENEWLINE # RunNEWLINE response = self.app.post(NEWLINE '/ukbrest/api/v1.0/query',NEWLINE data={NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'covariates',NEWLINE },NEWLINE headers=self._get_http_basic_auth_header('user', 'thepassword2'),NEWLINE )NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,NEWLINE na_values='', keep_default_na=False)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (5, 2 + 1) # plus IIDNEWLINENEWLINE def test_phenotype_query_yaml_get_fields(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example10/example10_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE covariates:NEWLINE field_name_34: c34_0_0 NEWLINE field_name_47: c47_0_0NEWLINENEWLINE fields:NEWLINE instance0: c21_0_0NEWLINE instance1: c21_1_0 NEWLINE instance2: c21_2_0 NEWLINE """NEWLINENEWLINE # RunNEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'fields',NEWLINE })NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,NEWLINE na_values='', keep_default_na=False)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (5, 3 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert all(x in pheno_file.index for x in (1000010, 1000020, 1000030, 1000040, 1000050))NEWLINENEWLINE expected_columns = ['IID'] + ['instance0', 'instance1', 'instance2']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINE # column orderNEWLINE assert pheno_file.columns.tolist()[0] == 'IID'NEWLINENEWLINE assert pheno_file.loc[1000010, 'IID'] == '1000010'NEWLINE assert pheno_file.loc[1000010, 'instance0'] == 'Option number 1'NEWLINE assert pheno_file.loc[1000010, 'instance1'] == 'No response'NEWLINE assert pheno_file.loc[1000010, 'instance2'] == 'Yes'NEWLINENEWLINE assert pheno_file.loc[1000040, 'IID'] == '1000040'NEWLINE assert pheno_file.loc[1000040, 'instance0'] == 'Option number 4'NEWLINE assert pheno_file.loc[1000040, 'instance1'] == "I don't know"NEWLINE assert pheno_file.loc[1000040, 'instance2'] == 'NA'NEWLINENEWLINE def test_phenotype_query_yaml_filter_samples_with_include_only(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example10/example10_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c47_0_0 > 0NEWLINE NEWLINE covariates:NEWLINE field_name_34: c34_0_0 NEWLINE field_name_47: c47_0_0NEWLINENEWLINE fields:NEWLINE instance0: c21_0_0NEWLINE instance1: c21_1_0 NEWLINE instance2: c21_2_0 NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 2NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'fields',NEWLINE })NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,NEWLINE na_values='', keep_default_na=False)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3 + 1), pheno_file.shape # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert all(x in pheno_file.index for x in (1000010, 1000040)), pheno_file.index.tolist()NEWLINENEWLINE expected_columns = ['IID'] + ['instance0', 'instance1', 'instance2']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINE # column orderNEWLINE assert pheno_file.columns.tolist()[0] == 'IID'NEWLINENEWLINE assert pheno_file.loc[1000010, 'IID'] == '1000010'NEWLINE assert pheno_file.loc[1000010, 'instance0'] == 'Option number 1'NEWLINE assert pheno_file.loc[1000010, 'instance1'] == 'No response'NEWLINE assert pheno_file.loc[1000010, 'instance2'] == 'Yes'NEWLINENEWLINE assert pheno_file.loc[1000040, 'IID'] == '1000040'NEWLINE assert pheno_file.loc[1000040, 'instance0'] == 'Option number 4'NEWLINE assert pheno_file.loc[1000040, 'instance1'] == "I don't know"NEWLINE assert pheno_file.loc[1000040, 'instance2'] == 'NA'NEWLINENEWLINE #NEWLINE # Ask covariatesNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'covariates',NEWLINE })NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,NEWLINE na_values='', keep_default_na=False)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 2 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert all(x in pheno_file.index for x in (1000010, 1000040))NEWLINENEWLINE expected_columns = ['IID'] + ['field_name_34', 'field_name_47']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINE # column orderNEWLINE assert pheno_file.columns.tolist()[0] == 'IID'NEWLINENEWLINE assert pheno_file.loc[1000010, 'IID'] == '1000010'NEWLINE assert pheno_file.loc[1000010, 'field_name_34'] == '-33'NEWLINE assert pheno_file.loc[1000010, 'field_name_47'] == '41.55312'NEWLINENEWLINE assert pheno_file.loc[1000040, 'IID'] == '1000040'NEWLINE assert pheno_file.loc[1000040, 'field_name_34'] == '3'NEWLINE assert pheno_file.loc[1000040, 'field_name_47'] == '5.20832'NEWLINENEWLINE def test_phenotype_query_yaml_filter_samples_condition_breaking_for_fields_and_covariates(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example10/example10_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c47_0_0 > 0NEWLINE - c46_0_0 < 0 or c46_0_0 = 4 or c46_0_0 = 1NEWLINENEWLINE covariates:NEWLINE field_name_34: c34_0_0 NEWLINE field_name_47: c47_0_0NEWLINENEWLINE fields:NEWLINE instance0: c21_0_0NEWLINE instance1: c21_1_0 NEWLINE instance2: c21_2_0 NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 2NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'fields',NEWLINE })NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,NEWLINE na_values='', keep_default_na=False)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3 + 1), pheno_file.shape # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert all(x in pheno_file.index for x in (1000010, 1000040)), pheno_file.index.tolist()NEWLINENEWLINE expected_columns = ['IID'] + ['instance0', 'instance1', 'instance2']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINE # column orderNEWLINE assert pheno_file.columns.tolist()[0] == 'IID'NEWLINENEWLINE assert pheno_file.loc[1000010, 'IID'] == '1000010'NEWLINE assert pheno_file.loc[1000010, 'instance0'] == 'Option number 1'NEWLINE assert pheno_file.loc[1000010, 'instance1'] == 'No response'NEWLINE assert pheno_file.loc[1000010, 'instance2'] == 'Yes'NEWLINENEWLINE assert pheno_file.loc[1000040, 'IID'] == '1000040'NEWLINE assert pheno_file.loc[1000040, 'instance0'] == 'Option number 4'NEWLINE assert pheno_file.loc[1000040, 'instance1'] == "I don't know"NEWLINE assert pheno_file.loc[1000040, 'instance2'] == 'NA'NEWLINENEWLINE #NEWLINE # Ask covariatesNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'covariates',NEWLINE })NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,NEWLINE na_values='', keep_default_na=False)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 2 + 1) # plus IIDNEWLINENEWLINE assert pheno_file.index.name == 'FID'NEWLINE assert all(x in pheno_file.index for x in (1000010, 1000040))NEWLINENEWLINE expected_columns = ['IID'] + ['field_name_34', 'field_name_47']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINE # column orderNEWLINE assert pheno_file.columns.tolist()[0] == 'IID'NEWLINENEWLINE assert pheno_file.loc[1000010, 'IID'] == '1000010'NEWLINE assert pheno_file.loc[1000010, 'field_name_34'] == '-33'NEWLINE assert pheno_file.loc[1000010, 'field_name_47'] == '41.55312'NEWLINENEWLINE assert pheno_file.loc[1000040, 'IID'] == '1000040'NEWLINE assert pheno_file.loc[1000040, 'field_name_34'] == '3'NEWLINE assert pheno_file.loc[1000040, 'field_name_47'] == '5.20832'NEWLINENEWLINE def test_phenotype_query_yaml_specify_bgenie_format(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example10/example10_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c47_0_0 > 0NEWLINENEWLINE covariates:NEWLINE field_name_34: c34_0_0 NEWLINE field_name_47: c47_0_0NEWLINENEWLINE fields:NEWLINE instance0: c21_0_0NEWLINE instance1: c21_1_0 NEWLINE instance2: c21_2_0 NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 5NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'fields',NEWLINE 'missing_code': '-999',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shapeNEWLINENEWLINE expected_columns = ['instance0', 'instance1', 'instance2']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'instance0'] == '-999', pheno_file.loc[0, 'instance0']NEWLINE assert pheno_file.loc[0, 'instance1'] == '-999'NEWLINE assert pheno_file.loc[0, 'instance2'] == '-999'NEWLINENEWLINE assert pheno_file.loc[1, 'instance0'] == '-999'NEWLINE assert pheno_file.loc[1, 'instance1'] == '-999'NEWLINE assert pheno_file.loc[1, 'instance2'] == '-999'NEWLINENEWLINE assert pheno_file.loc[2, 'instance0'] == 'Option number 4'NEWLINE assert pheno_file.loc[2, 'instance1'] == "I don't know"NEWLINE assert pheno_file.loc[2, 'instance2'] == '-999'NEWLINENEWLINE assert pheno_file.loc[3, 'instance0'] == 'Option number 1'NEWLINE assert pheno_file.loc[3, 'instance1'] == 'No response'NEWLINE assert pheno_file.loc[3, 'instance2'] == 'Yes'NEWLINENEWLINE assert pheno_file.loc[4, 'instance0'] == '-999'NEWLINE assert pheno_file.loc[4, 'instance1'] == '-999'NEWLINE assert pheno_file.loc[4, 'instance2'] == '-999'NEWLINENEWLINE #NEWLINE # Ask covariatesNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'covariates',NEWLINE 'missing_code': '-999',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 2)NEWLINENEWLINE expected_columns = ['field_name_34', 'field_name_47']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'field_name_34'] == '-999'NEWLINE assert pheno_file.loc[0, 'field_name_47'] == '-999'NEWLINENEWLINE assert pheno_file.loc[1, 'field_name_34'] == '-999'NEWLINE assert pheno_file.loc[1, 'field_name_47'] == '-999'NEWLINENEWLINE assert pheno_file.loc[2, 'field_name_34'] == '3'NEWLINE assert pheno_file.loc[2, 'field_name_47'] == '5.20832'NEWLINENEWLINE assert pheno_file.loc[3, 'field_name_34'] == '-33'NEWLINE assert pheno_file.loc[3, 'field_name_47'] == '41.55312'NEWLINENEWLINE assert pheno_file.loc[4, 'field_name_34'] == '-999'NEWLINE assert pheno_file.loc[4, 'field_name_47'] == '-999'NEWLINENEWLINE def test_phenotype_query_yaml_specify_csv_format(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example10/example10_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c47_0_0 > 0NEWLINENEWLINE covariates:NEWLINE field_name_34: c34_0_0 NEWLINE field_name_47: c47_0_0NEWLINENEWLINE fields:NEWLINE instance0: c21_0_0NEWLINE instance1: c21_1_0 NEWLINE instance2: c21_2_0 NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 2NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'fields',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shapeNEWLINENEWLINE expected_columns = ['instance0', 'instance1', 'instance2']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000040, 'instance0'] == 'Option number 4'NEWLINE assert pheno_file.loc[1000040, 'instance1'] == "I don't know"NEWLINE assert pheno_file.loc[1000040, 'instance2'] == 'NA'NEWLINENEWLINE assert pheno_file.loc[1000010, 'instance0'] == 'Option number 1'NEWLINE assert pheno_file.loc[1000010, 'instance1'] == 'No response'NEWLINE assert pheno_file.loc[1000010, 'instance2'] == 'Yes'NEWLINENEWLINE #NEWLINE # Ask covariatesNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'covariates',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 2)NEWLINENEWLINE expected_columns = ['field_name_34', 'field_name_47']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000040, 'field_name_34'] == '3'NEWLINE assert pheno_file.loc[1000040, 'field_name_47'] == '5.20832'NEWLINENEWLINE assert pheno_file.loc[1000010, 'field_name_34'] == '-33'NEWLINE assert pheno_file.loc[1000010, 'field_name_47'] == '41.55312'NEWLINENEWLINE def test_phenotype_query_yaml_specify_bgenie_format_missing_code_default(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example10/example10_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c47_0_0 > 0NEWLINENEWLINE covariates:NEWLINE field_name_34: c34_0_0 NEWLINE field_name_47: c47_0_0NEWLINENEWLINE fields:NEWLINE instance0: c21_0_0NEWLINE instance1: c21_1_0 NEWLINE instance2: c21_2_0 NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 5NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'fields',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shapeNEWLINENEWLINE expected_columns = ['instance0', 'instance1', 'instance2']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'instance0'] == 'NA'NEWLINE assert pheno_file.loc[0, 'instance1'] == 'NA'NEWLINE assert pheno_file.loc[0, 'instance2'] == 'NA'NEWLINENEWLINE assert pheno_file.loc[1, 'instance0'] == 'NA'NEWLINE assert pheno_file.loc[1, 'instance1'] == 'NA'NEWLINE assert pheno_file.loc[1, 'instance2'] == 'NA'NEWLINENEWLINE assert pheno_file.loc[2, 'instance0'] == 'Option number 4'NEWLINE assert pheno_file.loc[2, 'instance1'] == "I don't know"NEWLINE assert pheno_file.loc[2, 'instance2'] == 'NA'NEWLINENEWLINE assert pheno_file.loc[3, 'instance0'] == 'Option number 1'NEWLINE assert pheno_file.loc[3, 'instance1'] == 'No response'NEWLINE assert pheno_file.loc[3, 'instance2'] == 'Yes'NEWLINENEWLINE assert pheno_file.loc[4, 'instance0'] == 'NA'NEWLINE assert pheno_file.loc[4, 'instance1'] == 'NA'NEWLINE assert pheno_file.loc[4, 'instance2'] == 'NA'NEWLINENEWLINE def test_phenotype_query_yaml_specify_csv_format_missing_code_changed(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example10/example10_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c47_0_0 > 0NEWLINENEWLINE covariates:NEWLINE field_name_34: c34_0_0 NEWLINE field_name_47: c47_0_0NEWLINENEWLINE fields:NEWLINE instance0: c21_0_0NEWLINE instance1: c21_1_0 NEWLINE instance2: c21_2_0 NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 2NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'fields',NEWLINE 'missing_code': '-999',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shapeNEWLINENEWLINE expected_columns = ['instance0', 'instance1', 'instance2']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000040, 'instance0'] == 'Option number 4'NEWLINE assert pheno_file.loc[1000040, 'instance1'] == "I don't know"NEWLINE assert pheno_file.loc[1000040, 'instance2'] == '-999'NEWLINENEWLINE assert pheno_file.loc[1000010, 'instance0'] == 'Option number 1'NEWLINE assert pheno_file.loc[1000010, 'instance1'] == 'No response'NEWLINE assert pheno_file.loc[1000010, 'instance2'] == 'Yes'NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_first_bgenie(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=10)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c34_0_0 >= -5NEWLINE NEWLINE data:NEWLINE disease0:NEWLINE case_control:NEWLINE 84:NEWLINE coding: [N308]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 6NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['disease0']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'disease0'] == '0' # 1000050NEWLINE assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030NEWLINE assert pheno_file.loc[2, 'disease0'] == '1' # 1000040NEWLINE assert pheno_file.loc[3, 'disease0'] == 'NA' # 1000010NEWLINE assert pheno_file.loc[4, 'disease0'] == '1' # 1000020NEWLINE assert pheno_file.loc[5, 'disease0'] == '0' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_second_bgenie(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=20)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c34_0_0 >= -5NEWLINENEWLINE data:NEWLINE disease0:NEWLINE case_control:NEWLINE 84:NEWLINE coding: [E103]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 6NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['disease0']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'disease0'] == '1' # 1000050NEWLINE assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030NEWLINE assert pheno_file.loc[2, 'disease0'] == '1' # 1000040NEWLINE assert pheno_file.loc[3, 'disease0'] == 'NA' # 1000010NEWLINE assert pheno_file.loc[4, 'disease0'] == '1' # 1000020NEWLINE assert pheno_file.loc[5, 'disease0'] == '0' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_different_filter_bgenie(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=20)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c31_0_0 > '2001-01-01'NEWLINENEWLINE data:NEWLINE disease0:NEWLINE case_control:NEWLINE 84:NEWLINE coding: [E103]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 6NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['disease0']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'disease0'] == 'NA' # 1000050NEWLINE assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030NEWLINE assert pheno_file.loc[2, 'disease0'] == 'NA' # 1000040NEWLINE assert pheno_file.loc[3, 'disease0'] == '1' # 1000010NEWLINE assert pheno_file.loc[4, 'disease0'] == '1' # 1000020NEWLINE assert pheno_file.loc[5, 'disease0'] == '0' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_filter_includes_nulls_bgenie(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=20)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c31_0_0 is null or c31_0_0 > '2001-01-01'NEWLINENEWLINE data:NEWLINE disease0:NEWLINE case_control:NEWLINE 84:NEWLINE coding: [E103]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 6NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['disease0']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'disease0'] == '1' # 1000050NEWLINE assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030NEWLINE assert pheno_file.loc[2, 'disease0'] == 'NA' # 1000040NEWLINE assert pheno_file.loc[3, 'disease0'] == '1' # 1000010NEWLINE assert pheno_file.loc[4, 'disease0'] == '1' # 1000020NEWLINE assert pheno_file.loc[5, 'disease0'] == '0' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_multiple_filters_using_like_bgenie(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=20)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c31_0_0 is null or c31_0_0 > '2001-01-01'NEWLINE - c21_2_0 not like '%%obab%%'NEWLINENEWLINE data:NEWLINE disease0:NEWLINE case_control:NEWLINE 84:NEWLINE coding: [E103]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 6NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['disease0']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'disease0'] == 'NA' # 1000050NEWLINE assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030NEWLINE assert pheno_file.loc[2, 'disease0'] == 'NA' # 1000040NEWLINE assert pheno_file.loc[3, 'disease0'] == '1' # 1000010NEWLINE assert pheno_file.loc[4, 'disease0'] == '1' # 1000020NEWLINE assert pheno_file.loc[5, 'disease0'] == '0' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_fields_in_filters_are_in_different_tables_bgenie(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c21_1_0 not like '%%respo%%'NEWLINE - c47_0_0 > 0NEWLINENEWLINE data:NEWLINE disease0:NEWLINE case_control:NEWLINE 84:NEWLINE coding: [Q750]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 6NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['disease0']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'disease0'] == 'NA' # 1000050NEWLINE assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030NEWLINE assert pheno_file.loc[2, 'disease0'] == '1' # 1000040NEWLINE assert pheno_file.loc[3, 'disease0'] == 'NA' # 1000010NEWLINE assert pheno_file.loc[4, 'disease0'] == 'NA' # 1000020NEWLINE assert pheno_file.loc[5, 'disease0'] == '0' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_different_data_field_bgenie(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - eid not in (select eid from events where field_id = 84 and event in ('Q750'))NEWLINE NEWLINE data:NEWLINE disease0:NEWLINE case_control:NEWLINE 85:NEWLINE coding: [1114]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 6NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['disease0']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'disease0'] == '1' # 1000050NEWLINE assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030NEWLINE assert pheno_file.loc[2, 'disease0'] == 'NA' # 1000040NEWLINE assert pheno_file.loc[3, 'disease0'] == 'NA' # 1000010NEWLINE assert pheno_file.loc[4, 'disease0'] == '1' # 1000020NEWLINE assert pheno_file.loc[5, 'disease0'] == '0' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_different_disease_name_bgenie(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - eid not in (select eid from events where field_id = 84 and event in ('Q750'))NEWLINENEWLINE data:NEWLINE another_disease_name:NEWLINE case_control:NEWLINE 85:NEWLINE coding: [1114]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 6NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'another_disease_name'] == '1' # 1000050NEWLINE assert pheno_file.loc[1, 'another_disease_name'] == 'NA' # 1000030NEWLINE assert pheno_file.loc[2, 'another_disease_name'] == 'NA' # 1000040NEWLINE assert pheno_file.loc[3, 'another_disease_name'] == 'NA' # 1000010NEWLINE assert pheno_file.loc[4, 'another_disease_name'] == '1' # 1000020NEWLINE assert pheno_file.loc[5, 'another_disease_name'] == '0' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_coding_not_list_bgenie(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - eid not in (select eid from events where field_id = 84 and event in ('Q750'))NEWLINENEWLINE data:NEWLINE another_disease_name:NEWLINE case_control:NEWLINE 85:NEWLINE coding: 1114NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 6NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'another_disease_name'] == '1' # 1000050NEWLINE assert pheno_file.loc[1, 'another_disease_name'] == 'NA' # 1000030NEWLINE assert pheno_file.loc[2, 'another_disease_name'] == 'NA' # 1000040NEWLINE assert pheno_file.loc[3, 'another_disease_name'] == 'NA' # 1000010NEWLINE assert pheno_file.loc[4, 'another_disease_name'] == '1' # 1000020NEWLINE assert pheno_file.loc[5, 'another_disease_name'] == '0' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_coding_not_list_csv(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - eid not in (select eid from events where field_id = 84 and event in ('Q750'))NEWLINENEWLINE data:NEWLINE another_disease_name:NEWLINE case_control:NEWLINE 85:NEWLINE coding: 1114NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 4NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050NEWLINE # assert pheno_file.loc[1000030, 'another_disease_name'] == '0' # 1000030NEWLINE # assert pheno_file.loc['1000040', 'another_disease_name'] == 'NA' # 1000040NEWLINE # assert pheno_file.loc[1000010, 'another_disease_name'] == '1' # 1000010NEWLINE assert pheno_file.loc[1000020, 'another_disease_name'] == '1' # 1000020NEWLINE assert pheno_file.loc[1000070, 'another_disease_name'] == '0' # 1000070NEWLINE assert pheno_file.loc[1000060, 'another_disease_name'] == '1' # 1000060NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_many_codings_bgenie(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe')NEWLINENEWLINE data:NEWLINE another_disease_name:NEWLINE case_control:NEWLINE 85:NEWLINE coding: [1114, 1701]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 6NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'another_disease_name'] == 'NA' # 1000050NEWLINE assert pheno_file.loc[1, 'another_disease_name'] == '0' # 1000030NEWLINE assert pheno_file.loc[2, 'another_disease_name'] == 'NA' # 1000040NEWLINE assert pheno_file.loc[3, 'another_disease_name'] == '1' # 1000010NEWLINE assert pheno_file.loc[4, 'another_disease_name'] == '1' # 1000020NEWLINE assert pheno_file.loc[5, 'another_disease_name'] == '0' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_many_codings_csv(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe')NEWLINENEWLINE data:NEWLINE another_disease_name:NEWLINE case_control:NEWLINE 85:NEWLINE coding: [1114, 1701]NEWLINE """NEWLINENEWLINE # text/csv does not fetch all samples in 'samples' table by defaultNEWLINE N_EXPECTED_SAMPLES = 5NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE # assert pheno_file.loc['1000050', 'another_disease_name'] == 'NA' # 1000050NEWLINE assert pheno_file.loc[1000030, 'another_disease_name'] == '0' # 1000030NEWLINE # assert pheno_file.loc['1000040', 'another_disease_name'] == 'NA' # 1000040NEWLINE assert pheno_file.loc[1000010, 'another_disease_name'] == '1' # 1000010NEWLINE assert pheno_file.loc[1000020, 'another_disease_name'] == '1' # 1000020NEWLINE assert pheno_file.loc[1000070, 'another_disease_name'] == '0' # 1000070NEWLINE assert pheno_file.loc[1000060, 'another_disease_name'] == '1' # 1000060NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_many_data_fields_bgenie(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case the filters are not necessary, but it is forced to avoid a problem with joining that willNEWLINE # be tested in another unit testNEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c21_2_0 is null or lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINENEWLINE data:NEWLINE another_disease_name:NEWLINE case_control:NEWLINE 85:NEWLINE coding: [978, 1701]NEWLINE 84:NEWLINE coding: [Z876, Z678]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 6NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'another_disease_name'] == '1' # 1000050NEWLINE assert pheno_file.loc[1, 'another_disease_name'] == '1' # 1000030NEWLINE assert pheno_file.loc[2, 'another_disease_name'] == '0' # 1000040NEWLINE assert pheno_file.loc[3, 'another_disease_name'] == '1' # 1000010NEWLINE assert pheno_file.loc[4, 'another_disease_name'] == '0' # 1000020NEWLINE assert pheno_file.loc[5, 'another_disease_name'] == '1' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE def test_phenotype_query_yaml_disease_by_coding_many_data_fields_csv(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case the filters are not necessary, but it is forced to avoid a problem with joining that willNEWLINE # be tested in another unit testNEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - c21_2_0 is null or lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINENEWLINE data:NEWLINE another_disease_name:NEWLINE case_control:NEWLINE 85:NEWLINE coding: [978, 1701]NEWLINE 84:NEWLINE coding: [Z876, Z678]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 7NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050NEWLINE assert pheno_file.loc[1000030, 'another_disease_name'] == '1' # 1000030NEWLINE assert pheno_file.loc[1000040, 'another_disease_name'] == '0' # 1000040NEWLINE assert pheno_file.loc[1000010, 'another_disease_name'] == '1' # 1000010NEWLINE assert pheno_file.loc[1000020, 'another_disease_name'] == '0' # 1000020NEWLINE assert pheno_file.loc[1000070, 'another_disease_name'] == '1' # 1000070NEWLINE assert pheno_file.loc[1000060, 'another_disease_name'] == '1' # 1000060NEWLINENEWLINE def test_phenotype_query_yaml_disease_filters_not_referencing_table_bgenie(self):NEWLINE """This test forces a global table to obtain eid from for controls"""NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case the filters are not necessary, but it is forced to avoid a problem with joining that willNEWLINE # be tested in another unit testNEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - 1 = 1NEWLINENEWLINE data:NEWLINE another_disease_name:NEWLINE case_control:NEWLINE 85:NEWLINE coding: [978, 1701]NEWLINE 84:NEWLINE coding: [Z876, Z678]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 6NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'another_disease_name'] == '1' # 1000050NEWLINE assert pheno_file.loc[1, 'another_disease_name'] == '1' # 1000030NEWLINE assert pheno_file.loc[2, 'another_disease_name'] == '0' # 1000040NEWLINE assert pheno_file.loc[3, 'another_disease_name'] == '1' # 1000010NEWLINE assert pheno_file.loc[4, 'another_disease_name'] == '0' # 1000020NEWLINE assert pheno_file.loc[5, 'another_disease_name'] == '1' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE def test_phenotype_query_yaml_disease_filters_not_referencing_table_csv(self):NEWLINE """This test forces a global table to obtain eid from for controls"""NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case the filters are not necessary, but it is forced to avoid a problem with joining that willNEWLINE # be tested in another unit testNEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - 1 = 1NEWLINENEWLINE data:NEWLINE another_disease_name:NEWLINE case_control:NEWLINE 85:NEWLINE coding: [978, 1701]NEWLINE 84:NEWLINE coding: [Z876, Z678]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 7NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050NEWLINE assert pheno_file.loc[1000030, 'another_disease_name'] == '1' # 1000030NEWLINE assert pheno_file.loc[1000040, 'another_disease_name'] == '0' # 1000040NEWLINE assert pheno_file.loc[1000010, 'another_disease_name'] == '1' # 1000010NEWLINE assert pheno_file.loc[1000020, 'another_disease_name'] == '0' # 1000020NEWLINE assert pheno_file.loc[1000070, 'another_disease_name'] == '1' # 1000070NEWLINE assert pheno_file.loc[1000060, 'another_disease_name'] == '1' # 1000060NEWLINENEWLINE def test_phenotype_query_yaml_disease_no_filters_csv(self):NEWLINE """This test forces a global table to obtain eid from for controls"""NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case the filters are not necessary, but it is forced to avoid a problem with joining that willNEWLINE # be tested in another unit testNEWLINE yaml_data = b"""NEWLINE data:NEWLINE another_disease_name:NEWLINE case_control:NEWLINE 85:NEWLINE coding: [978, 1701]NEWLINE 84:NEWLINE coding: [Z876, Z678]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 7NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050NEWLINE assert pheno_file.loc[1000030, 'another_disease_name'] == '1' # 1000030NEWLINE assert pheno_file.loc[1000040, 'another_disease_name'] == '0' # 1000040NEWLINE assert pheno_file.loc[1000010, 'another_disease_name'] == '1' # 1000010NEWLINE assert pheno_file.loc[1000020, 'another_disease_name'] == '0' # 1000020NEWLINE assert pheno_file.loc[1000070, 'another_disease_name'] == '1' # 1000070NEWLINE assert pheno_file.loc[1000060, 'another_disease_name'] == '1' # 1000060NEWLINENEWLINE def test_phenotype_query_yaml_disease_many_columns_bgenie(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case the filters are not necessary, but it is forced to avoid a problem with joining that willNEWLINE # be tested in another unit testNEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - c34_0_0 > -10NEWLINENEWLINE data:NEWLINE another_disease_name:NEWLINE case_control:NEWLINE 85:NEWLINE coding: [978, 1701]NEWLINE 84:NEWLINE coding: [Z876, Z678]NEWLINE second_column:NEWLINE case_control:NEWLINE 85:NEWLINE coding: 1114NEWLINE third_column:NEWLINE case_control:NEWLINE 84:NEWLINE coding: [E103, Z678]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 6NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/bgenie'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,NEWLINE dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name', 'second_column', 'third_column']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[0, 'another_disease_name'] == '1' # 1000050NEWLINE assert pheno_file.loc[1, 'another_disease_name'] == '1' # 1000030NEWLINE assert pheno_file.loc[2, 'another_disease_name'] == 'NA' # 1000040NEWLINE assert pheno_file.loc[3, 'another_disease_name'] == 'NA' # 1000010NEWLINE assert pheno_file.loc[4, 'another_disease_name'] == '0' # 1000020NEWLINE assert pheno_file.loc[5, 'another_disease_name'] == '1' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE assert pheno_file.loc[0, 'second_column'] == '1' # 1000050NEWLINE assert pheno_file.loc[1, 'second_column'] == '0' # 1000030NEWLINE assert pheno_file.loc[2, 'second_column'] == 'NA' # 1000040NEWLINE assert pheno_file.loc[3, 'second_column'] == 'NA' # 1000010NEWLINE assert pheno_file.loc[4, 'second_column'] == '1' # 1000020NEWLINE assert pheno_file.loc[5, 'second_column'] == '0' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE assert pheno_file.loc[0, 'third_column'] == '1' # 1000050NEWLINE assert pheno_file.loc[1, 'third_column'] == '0' # 1000030NEWLINE assert pheno_file.loc[2, 'third_column'] == 'NA' # 1000040NEWLINE assert pheno_file.loc[3, 'third_column'] == 'NA' # 1000010NEWLINE assert pheno_file.loc[4, 'third_column'] == '1' # 1000020NEWLINE assert pheno_file.loc[5, 'third_column'] == '1' # 1000070NEWLINE # 1000060 is "not genotyped" (it is not listed in BGEN's samples file)NEWLINENEWLINE def test_phenotype_query_yaml_disease_many_columns_csv(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case the filters are not necessary, but it is forced to avoid a problem with joining that willNEWLINE # be tested in another unit testNEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - c34_0_0 > -10NEWLINENEWLINE data:NEWLINE another_disease_name:NEWLINE case_control:NEWLINE 85:NEWLINE coding: [978, 1701]NEWLINE 84:NEWLINE coding: [Z876, Z678]NEWLINE second_column:NEWLINE case_control:NEWLINE 85:NEWLINE coding: 1114NEWLINE third_column:NEWLINE case_control:NEWLINE 84:NEWLINE coding: [E103, Z678]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 4NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name', 'second_column', 'third_column']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050NEWLINE assert pheno_file.loc[1000030, 'another_disease_name'] == '1' # 1000030NEWLINE assert pheno_file.loc[1000020, 'another_disease_name'] == '0' # 1000020NEWLINE assert pheno_file.loc[1000070, 'another_disease_name'] == '1' # 1000070NEWLINENEWLINE assert pheno_file.loc[1000050, 'second_column'] == '1' # 1000050NEWLINE assert pheno_file.loc[1000030, 'second_column'] == '0' # 1000030NEWLINE assert pheno_file.loc[1000020, 'second_column'] == '1' # 1000020NEWLINE assert pheno_file.loc[1000070, 'second_column'] == '0' # 1000070NEWLINENEWLINE assert pheno_file.loc[1000050, 'third_column'] == '1' # 1000050NEWLINE assert pheno_file.loc[1000030, 'third_column'] == '0' # 1000030NEWLINE assert pheno_file.loc[1000020, 'third_column'] == '1' # 1000020NEWLINE assert pheno_file.loc[1000070, 'third_column'] == '1' # 1000070NEWLINENEWLINE def test_phenotype_query_yaml_disease_sql_alone_csv(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case the filters are not necessary, but it is forced to avoid a problem with joining that willNEWLINE # be tested in another unit testNEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - c34_0_0 is null or c34_0_0 > -10NEWLINENEWLINE data:NEWLINE mydisease:NEWLINE sql:NEWLINE 1: c46_0_0 > 0NEWLINE 0: c46_0_0 < 0NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 4NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['mydisease']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000050, 'mydisease'] == '1' # 1000050NEWLINE assert pheno_file.loc[1000030, 'mydisease'] == '0' # 1000030NEWLINE assert pheno_file.loc[1000020, 'mydisease'] == '0' # 1000020NEWLINE assert pheno_file.loc[1000070, 'mydisease'] == '1' # 1000070NEWLINENEWLINE @unittest.skip("We should check if there are repeated eid values, like in this case, due to bad specification of conditions for categories")NEWLINE def test_phenotype_query_yaml_disease_sql_conflicting_duplicated_samples_csv(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case the filters are not necessary, but it is forced to avoid a problem with joining that willNEWLINE # be tested in another unit testNEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - c34_0_0 is null or c34_0_0 > -10NEWLINENEWLINE data:NEWLINE mydisease:NEWLINE sql:NEWLINE 1: c46_0_0 >= 1NEWLINE 0: c46_0_0 <= 1NEWLINE """NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 400, response.status_codeNEWLINENEWLINE def test_phenotype_query_yaml_disease_sql_with_many_columns_csv(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # Here I emulate case_control with sqlNEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - c34_0_0 > -10NEWLINENEWLINE data:NEWLINE another_disease_name:NEWLINE sql:NEWLINE 1: >NEWLINE eid in (select eid from events where field_id = 85 and event in ('978', '1701'))NEWLINE ORNEWLINE eid in (select eid from events where field_id = 84 and event in ('Z876', 'Z678'))NEWLINE 0: >NEWLINE eid not in (NEWLINE (select eid from events where field_id = 85 and event in ('978', '1701'))NEWLINE unionNEWLINE (select eid from events where field_id = 84 and event in ('Z876', 'Z678'))NEWLINE )NEWLINE second_column:NEWLINE case_control:NEWLINE 85:NEWLINE coding: 1114NEWLINE third_column:NEWLINE case_control:NEWLINE 84:NEWLINE coding: [E103, Z678]NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 4NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name', 'second_column', 'third_column']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050NEWLINE assert pheno_file.loc[1000030, 'another_disease_name'] == '1' # 1000030NEWLINE assert pheno_file.loc[1000020, 'another_disease_name'] == '0' # 1000020NEWLINE assert pheno_file.loc[1000070, 'another_disease_name'] == '1' # 1000070NEWLINENEWLINE assert pheno_file.loc[1000050, 'second_column'] == '1' # 1000050NEWLINE assert pheno_file.loc[1000030, 'second_column'] == '0' # 1000030NEWLINE assert pheno_file.loc[1000020, 'second_column'] == '1' # 1000020NEWLINE assert pheno_file.loc[1000070, 'second_column'] == '0' # 1000070NEWLINENEWLINE assert pheno_file.loc[1000050, 'third_column'] == '1' # 1000050NEWLINE assert pheno_file.loc[1000030, 'third_column'] == '0' # 1000030NEWLINE assert pheno_file.loc[1000020, 'third_column'] == '1' # 1000020NEWLINE assert pheno_file.loc[1000070, 'third_column'] == '1' # 1000070NEWLINENEWLINE def test_phenotype_query_yaml_disease_sql_no_filters_csv(self):NEWLINE """This test forces a global table to obtain eid from for controls"""NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case the filters are not necessary, but it is forced to avoid a problem with joining that willNEWLINE # be tested in another unit testNEWLINE yaml_data = b"""NEWLINE data:NEWLINE another_disease_name:NEWLINE sql:NEWLINE 1: >NEWLINE eid in (select eid from events where field_id = 85 and event in ('978', '1701'))NEWLINE ORNEWLINE eid in (select eid from events where field_id = 84 and event in ('Z876', 'Z678'))NEWLINE 0: >NEWLINE eid not in (NEWLINE (select eid from events where field_id = 85 and event in ('978', '1701'))NEWLINE unionNEWLINE (select eid from events where field_id = 84 and event in ('Z876', 'Z678'))NEWLINE )NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 7NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['another_disease_name']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050NEWLINE assert pheno_file.loc[1000030, 'another_disease_name'] == '1' # 1000030NEWLINE assert pheno_file.loc[1000040, 'another_disease_name'] == '0' # 1000040NEWLINE assert pheno_file.loc[1000010, 'another_disease_name'] == '1' # 1000010NEWLINE assert pheno_file.loc[1000020, 'another_disease_name'] == '0' # 1000020NEWLINE assert pheno_file.loc[1000070, 'another_disease_name'] == '1' # 1000070NEWLINE assert pheno_file.loc[1000060, 'another_disease_name'] == '1' # 1000060NEWLINENEWLINE def test_phenotype_query_yaml_samples_filters_condition_breaking_for_data(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case there is an or condition that could break all if it is not surrounding by ()NEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - c34_0_0 is null or c34_0_0 > -10 or c34_0_0 > -11NEWLINENEWLINE data:NEWLINE mydisease:NEWLINE sql:NEWLINE 1: c46_0_0 > 0NEWLINE 0: c46_0_0 < 0NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 4NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shapeNEWLINENEWLINE expected_columns = ['mydisease']NEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000050, 'mydisease'] == '1' # 1000050NEWLINE assert pheno_file.loc[1000030, 'mydisease'] == '0' # 1000030NEWLINE assert pheno_file.loc[1000020, 'mydisease'] == '0' # 1000020NEWLINE assert pheno_file.loc[1000070, 'mydisease'] == '1' # 1000070NEWLINENEWLINE def test_phenotype_query_yaml_samples_including_numerical(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case there is an or condition that could break all if it is not surrounding by ()NEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - c34_0_0 is null or c34_0_0 > -10 or c34_0_0 > -11NEWLINENEWLINE data:NEWLINE continuous_data: c47_0_0NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 5NEWLINE expected_columns = ['continuous_data']NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, len(expected_columns)), pheno_file.shapeNEWLINENEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000050, 'continuous_data'] == 'NA'NEWLINE assert pheno_file.loc[1000030, 'continuous_data'] == '-35.31471'NEWLINE assert pheno_file.loc[1000020, 'continuous_data'] == '-10.51461'NEWLINE assert pheno_file.loc[1000060, 'continuous_data'] == '-0.5864'NEWLINE assert pheno_file.loc[1000070, 'continuous_data'] == '3.5584'NEWLINENEWLINE def test_phenotype_query_yaml_samples_including_numerical_integer(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case there is an or condition that could break all if it is not surrounding by ()NEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - c34_0_0 is null or c34_0_0 > -10 or c34_0_0 > -11NEWLINENEWLINE data:NEWLINE integer_data:NEWLINE (case when c46_0_0 < -5 then NULL else c46_0_0 end)NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 5NEWLINE expected_columns = ['integer_data']NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, len(expected_columns)), pheno_file.shapeNEWLINENEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000050, 'integer_data'] == '1'NEWLINE assert pheno_file.loc[1000030, 'integer_data'] == 'NA'NEWLINE assert pheno_file.loc[1000020, 'integer_data'] == '-2'NEWLINE assert pheno_file.loc[1000060, 'integer_data'] == 'NA'NEWLINE assert pheno_file.loc[1000070, 'integer_data'] == '2'NEWLINENEWLINE def test_phenotype_query_yaml_samples_including_categorical_and_numerical(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case there is an or condition that could break all if it is not surrounding by ()NEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - c34_0_0 is null or c34_0_0 > -10 or c34_0_0 > -11NEWLINENEWLINE data:NEWLINE mydisease:NEWLINE sql:NEWLINE 1: c46_0_0 > 0NEWLINE 0: c46_0_0 < 0NEWLINE NEWLINE third_column:NEWLINE case_control:NEWLINE 84:NEWLINE coding: [E103, Z678]NEWLINE NEWLINE continuous_data:NEWLINE c47_0_0NEWLINE NEWLINE integer_data: (case when c46_0_0 < 0 then NULL else c46_0_0 end)NEWLINE """NEWLINENEWLINE N_EXPECTED_SAMPLES = 5NEWLINE expected_columns = ['mydisease', 'third_column', 'continuous_data', 'integer_data']NEWLINENEWLINE #NEWLINE # Ask fieldsNEWLINE #NEWLINE response = self.app.post('/ukbrest/api/v1.0/query', data=NEWLINE {NEWLINE 'file': (io.BytesIO(yaml_data), 'data.yaml'),NEWLINE 'section': 'data',NEWLINE }, headers={'accept': 'text/csv'})NEWLINENEWLINE # ValidateNEWLINE assert response.status_code == 200, response.status_codeNEWLINENEWLINE pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,NEWLINE index_col='eid', dtype=str, na_values='', keep_default_na=False)NEWLINENEWLINE assert pheno_file is not NoneNEWLINE assert not pheno_file.emptyNEWLINE assert pheno_file.shape == (N_EXPECTED_SAMPLES, len(expected_columns)), pheno_file.shapeNEWLINENEWLINE assert len(pheno_file.columns) == len(expected_columns)NEWLINE assert all(x in expected_columns for x in pheno_file.columns)NEWLINENEWLINE assert pheno_file.loc[1000050, 'mydisease'] == '1'NEWLINE assert pheno_file.loc[1000030, 'mydisease'] == '0'NEWLINE assert pheno_file.loc[1000020, 'mydisease'] == '0'NEWLINE assert pheno_file.loc[1000060, 'mydisease'] == 'NA'NEWLINE assert pheno_file.loc[1000070, 'mydisease'] == '1'NEWLINENEWLINE assert pheno_file.loc[1000050, 'third_column'] == '1'NEWLINE assert pheno_file.loc[1000030, 'third_column'] == '0'NEWLINE assert pheno_file.loc[1000020, 'third_column'] == '1'NEWLINE assert pheno_file.loc[1000060, 'third_column'] == '0'NEWLINE assert pheno_file.loc[1000070, 'third_column'] == '1'NEWLINENEWLINE assert pheno_file.loc[1000050, 'continuous_data'] == 'NA'NEWLINE assert pheno_file.loc[1000030, 'continuous_data'] == '-35.31471'NEWLINE assert pheno_file.loc[1000020, 'continuous_data'] == '-10.51461'NEWLINE assert pheno_file.loc[1000060, 'continuous_data'] == '-0.5864'NEWLINE assert pheno_file.loc[1000070, 'continuous_data'] == '3.5584'NEWLINENEWLINE assert pheno_file.loc[1000050, 'integer_data'] == '1'NEWLINE assert pheno_file.loc[1000030, 'integer_data'] == 'NA'NEWLINE assert pheno_file.loc[1000020, 'integer_data'] == 'NA'NEWLINE assert pheno_file.loc[1000060, 'integer_data'] == 'NA'NEWLINE assert pheno_file.loc[1000070, 'integer_data'] == '2'NEWLINENEWLINE def test_phenotype_query_yaml_multiple_files_in_one_yaml(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # in this case there is an or condition that could break all if it is not surrounding by ()NEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - c34_0_0 is null or c34_0_0 > -10 or c34_0_0 > -11NEWLINENEWLINE covariates:NEWLINE field_name_34: c34_0_0 NEWLINE field_name_47: c47_0_0NEWLINENEWLINE my_first_dataset:NEWLINE mydisease:NEWLINE sql:NEWLINE 1: c46_0_0 > 0NEWLINE 0: c46_0_0 < 0NEWLINENEWLINE continuous_data:NEWLINE c47_0_0NEWLINENEWLINE my_second_dataset:NEWLINE third_column:NEWLINE case_control:NEWLINE 84:NEWLINE coding: [E103, Z678]NEWLINENEWLINE integer_data: (case when c46_0_0 < 0 then NULL else c46_0_0 end)NEWLINE """NEWLINENEWLINE # covariatesNEWLINE data_fetched =\NEWLINE self._make_yaml_request(NEWLINE yaml_data, 'covariates', 5,NEWLINE ['field_name_34', 'field_name_47']NEWLINE )NEWLINENEWLINE assert data_fetched.loc[1000020, 'field_name_34'] == '34'NEWLINE assert data_fetched.loc[1000030, 'field_name_34'] == '-6'NEWLINE assert data_fetched.loc[1000050, 'field_name_34'] == '-4'NEWLINE assert data_fetched.loc[1000060, 'field_name_34'] == 'NA'NEWLINE assert data_fetched.loc[1000070, 'field_name_34'] == '-5'NEWLINENEWLINE # my_first_datasetNEWLINE data_fetched =\NEWLINE self._make_yaml_request(NEWLINE yaml_data, 'my_first_dataset', 5,NEWLINE ['mydisease', 'continuous_data']NEWLINE )NEWLINENEWLINE assert data_fetched.loc[1000050, 'mydisease'] == '1'NEWLINE assert data_fetched.loc[1000030, 'mydisease'] == '0'NEWLINE assert data_fetched.loc[1000020, 'mydisease'] == '0'NEWLINE assert data_fetched.loc[1000060, 'mydisease'] == 'NA'NEWLINE assert data_fetched.loc[1000070, 'mydisease'] == '1'NEWLINENEWLINE assert data_fetched.loc[1000050, 'continuous_data'] == 'NA'NEWLINE assert data_fetched.loc[1000030, 'continuous_data'] == '-35.31471'NEWLINE assert data_fetched.loc[1000020, 'continuous_data'] == '-10.51461'NEWLINE assert data_fetched.loc[1000060, 'continuous_data'] == '-0.5864'NEWLINE assert data_fetched.loc[1000070, 'continuous_data'] == '3.5584'NEWLINENEWLINE # my_second_datasetNEWLINE data_fetched =\NEWLINE self._make_yaml_request(NEWLINE yaml_data, 'my_second_dataset', 5,NEWLINE ['third_column', 'integer_data']NEWLINE )NEWLINENEWLINE assert data_fetched.loc[1000050, 'third_column'] == '1'NEWLINE assert data_fetched.loc[1000030, 'third_column'] == '0'NEWLINE assert data_fetched.loc[1000020, 'third_column'] == '1'NEWLINE assert data_fetched.loc[1000060, 'third_column'] == '0'NEWLINE assert data_fetched.loc[1000070, 'third_column'] == '1'NEWLINENEWLINE assert data_fetched.loc[1000050, 'integer_data'] == '1'NEWLINE assert data_fetched.loc[1000030, 'integer_data'] == 'NA'NEWLINE assert data_fetched.loc[1000020, 'integer_data'] == 'NA'NEWLINE assert data_fetched.loc[1000060, 'integer_data'] == 'NA'NEWLINE assert data_fetched.loc[1000070, 'integer_data'] == '2'NEWLINENEWLINE def test_phenotype_query_yaml_simple_query(self):NEWLINE # PrepareNEWLINE self.setUp('pheno2sql/example13/example13_diseases.csv',NEWLINE bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),NEWLINE sql_chunksize=2, n_columns_per_table=2)NEWLINENEWLINE # this type of query, with 'simple_' at the begining of the data section, makes direct queries to theNEWLINE # databaseNEWLINE yaml_data = b"""NEWLINE samples_filters:NEWLINE - lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')NEWLINE - c34_0_0 is null or c34_0_0 > -10 or c34_0_0 > -11NEWLINENEWLINE simple_covariates:NEWLINE field_name_34: c34_0_0 NEWLINE field_name_47: c47_0_0NEWLINE """NEWLINENEWLINE # simple_covariatesNEWLINE data_fetched =\NEWLINE self._make_yaml_request(NEWLINE yaml_data, 'simple_covariates', 5,NEWLINE ['field_name_34', 'field_name_47']NEWLINE )NEWLINENEWLINE assert data_fetched.loc[1000020, 'field_name_34'] == '34'NEWLINE assert data_fetched.loc[1000030, 'field_name_34'] == '-6'NEWLINE assert data_fetched.loc[1000050, 'field_name_34'] == '-4'NEWLINE assert data_fetched.loc[1000060, 'field_name_34'] == 'NA'NEWLINE assert data_fetched.loc[1000070, 'field_name_34'] == '-5'NEWLINE
# @copyright@NEWLINE# Copyright (c) 2006 - 2018 TeradataNEWLINE# All rights reserved. Stacki(r) v5.x stacki.comNEWLINE# https://github.com/Teradata/stacki/blob/master/LICENSE.txtNEWLINE# @copyright@NEWLINENEWLINEimport stack.commandsNEWLINEfrom stack.exception import ArgRequired, ArgUnique, CommandErrorNEWLINENEWLINENEWLINEclass command(stack.commands.EnvironmentArgumentProcessor,NEWLINE stack.commands.add.command):NEWLINE passNEWLINENEWLINENEWLINEclass Command(command):NEWLINE """NEWLINE Add an environment to the database.NEWLINE NEWLINE <arg type='string' name='environment'>NEWLINE The environment name.NEWLINE </arg>NEWLINE """NEWLINENEWLINE def run(self, params, args):NEWLINENEWLINE if len(args) == 0:NEWLINE raise ArgRequired(self, 'environment')NEWLINE if len(args) != 1:NEWLINE raise ArgUnique(self, 'environment')NEWLINE environment = args[0]NEWLINENEWLINE dup = FalseNEWLINE for row in self.db.select(NEWLINE """NEWLINE * from environments where name='%s'NEWLINE """ % environment):NEWLINE dup = TrueNEWLINE if dup:NEWLINE raise CommandError(self, 'environment "%s" already exists' % environment)NEWLINENEWLINE self.db.execute(NEWLINE """NEWLINE insert into environments (name) values ('%s')NEWLINE """ % environment)NEWLINE NEWLINE
# -*- coding: utf-8 -*-NEWLINENEWLINE# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:NEWLINE# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-codeNEWLINENEWLINEfrom ccxt.kucoin import kucoinNEWLINEfrom ccxt.base.errors import AuthenticationErrorNEWLINEfrom ccxt.base.errors import PermissionDeniedNEWLINEfrom ccxt.base.errors import AccountSuspendedNEWLINEfrom ccxt.base.errors import ArgumentsRequiredNEWLINEfrom ccxt.base.errors import BadRequestNEWLINEfrom ccxt.base.errors import InsufficientFundsNEWLINEfrom ccxt.base.errors import InvalidOrderNEWLINEfrom ccxt.base.errors import NotSupportedNEWLINEfrom ccxt.base.errors import RateLimitExceededNEWLINEfrom ccxt.base.errors import ExchangeNotAvailableNEWLINEfrom ccxt.base.errors import InvalidNonceNEWLINEfrom ccxt.base.precise import PreciseNEWLINENEWLINENEWLINEclass kucoinfutures(kucoin):NEWLINENEWLINE def describe(self):NEWLINE return self.deep_extend(super(kucoinfutures, self).describe(), {NEWLINE 'id': 'kucoinfutures',NEWLINE 'name': 'Kucoin Futures',NEWLINE 'countries': ['SC'],NEWLINE 'rateLimit': 75,NEWLINE 'version': 'v1',NEWLINE 'certified': False,NEWLINE 'pro': False,NEWLINE 'comment': 'Platform 2.0',NEWLINE 'quoteJsonNumbers': False,NEWLINE 'has': {NEWLINE 'cancelAllOrders': True,NEWLINE 'cancelOrder': True,NEWLINE 'CORS': None,NEWLINE 'createDepositAddress': False,NEWLINE 'createOrder': True,NEWLINE 'fetchAccounts': False,NEWLINE 'fetchBalance': True,NEWLINE 'fetchBorrowRate': False,NEWLINE 'fetchBorrowRates': False,NEWLINE 'fetchBorrowRatesPerSymbol': False,NEWLINE 'fetchClosedOrders': True,NEWLINE 'fetchCurrencies': False,NEWLINE 'fetchDepositAddress': True,NEWLINE 'fetchDeposits': None,NEWLINE 'fetchFundingFee': False,NEWLINE 'fetchFundingHistory': True,NEWLINE 'fetchFundingRate': True,NEWLINE 'fetchFundingRateHistory': False,NEWLINE 'fetchIndexOHLCV': False,NEWLINE 'fetchL3OrderBook': False,NEWLINE 'fetchLedger': False,NEWLINE 'fetchMarkets': True,NEWLINE 'fetchMarkOHLCV': False,NEWLINE 'fetchMyTrades': True,NEWLINE 'fetchOHLCV': True,NEWLINE 'fetchOpenOrders': True,NEWLINE 'fetchOrder': True,NEWLINE 'fetchOrderBook': True,NEWLINE 'fetchOrdersByStatus': True,NEWLINE 'fetchPositions': True,NEWLINE 'fetchPremiumIndexOHLCV': False,NEWLINE 'fetchStatus': True,NEWLINE 'fetchTicker': True,NEWLINE 'fetchTickers': False,NEWLINE 'fetchTime': True,NEWLINE 'fetchTrades': True,NEWLINE 'fetchWithdrawals': None,NEWLINE 'loadTimeDifference': True,NEWLINE 'setMarginMode': False,NEWLINE 'transfer': True,NEWLINE 'transferOut': True,NEWLINE 'withdraw': None,NEWLINE },NEWLINE 'urls': {NEWLINE 'logo': 'https://user-images.githubusercontent.com/1294454/147508995-9e35030a-d046-43a1-a006-6fabd981b554.jpg',NEWLINE 'doc': [NEWLINE 'https://docs.kucoin.com/futures',NEWLINE 'https://docs.kucoin.com',NEWLINE ],NEWLINE 'www': 'https://futures.kucoin.com/',NEWLINE 'referral': 'https://futures.kucoin.com/?rcode=E5wkqe',NEWLINE 'api': {NEWLINE 'public': 'https://openapi-v2.kucoin.com',NEWLINE 'private': 'https://openapi-v2.kucoin.com',NEWLINE 'futuresPrivate': 'https://api-futures.kucoin.com',NEWLINE 'futuresPublic': 'https://api-futures.kucoin.com',NEWLINE },NEWLINE 'test': {NEWLINE 'public': 'https://openapi-sandbox.kucoin.com',NEWLINE 'private': 'https://openapi-sandbox.kucoin.com',NEWLINE 'futuresPrivate': 'https://api-sandbox-futures.kucoin.com',NEWLINE 'futuresPublic': 'https://api-sandbox-futures.kucoin.com',NEWLINE },NEWLINE },NEWLINE 'requiredCredentials': {NEWLINE 'apiKey': True,NEWLINE 'secret': True,NEWLINE 'password': True,NEWLINE },NEWLINE 'api': {NEWLINE 'futuresPublic': {NEWLINE 'get': {NEWLINE 'contracts/active': 1,NEWLINE 'contracts/{symbol}': 1,NEWLINE 'ticker': 1,NEWLINE 'level2/snapshot': 1.33,NEWLINE 'level2/depth{limit}': 1,NEWLINE 'level2/message/query': 1,NEWLINE 'level3/message/query': 1, # deprecated,level3/snapshot is suggestedNEWLINE 'level3/snapshot': 1, # v2NEWLINE 'trade/history': 1,NEWLINE 'interest/query': 1,NEWLINE 'index/query': 1,NEWLINE 'mark-price/{symbol}/current': 1,NEWLINE 'premium/query': 1,NEWLINE 'funding-rate/{symbol}/current': 1,NEWLINE 'timestamp': 1,NEWLINE 'status': 1,NEWLINE 'kline/query': 1,NEWLINE },NEWLINE 'post': {NEWLINE 'bullet-public': 1,NEWLINE },NEWLINE },NEWLINE 'futuresPrivate': {NEWLINE 'get': {NEWLINE 'account-overview': 1.33,NEWLINE 'transaction-history': 4.44,NEWLINE 'deposit-address': 1,NEWLINE 'deposit-list': 1,NEWLINE 'withdrawals/quotas': 1,NEWLINE 'withdrawal-list': 1,NEWLINE 'transfer-list': 1,NEWLINE 'orders': 1.33,NEWLINE 'stopOrders': 1,NEWLINE 'recentDoneOrders': 1,NEWLINE 'orders/{orderId}': 1, # ?clientOid={client-order-id} # get order by orderIdNEWLINE 'orders/byClientOid': 1, # ?clientOid=eresc138b21023a909e5ad59 # get order by clientOidNEWLINE 'fills': 4.44,NEWLINE 'recentFills': 4.44,NEWLINE 'openOrderStatistics': 1,NEWLINE 'position': 1,NEWLINE 'positions': 4.44,NEWLINE 'funding-history': 4.44,NEWLINE },NEWLINE 'post': {NEWLINE 'withdrawals': 1,NEWLINE 'transfer-out': 1, # v2NEWLINE 'orders': 1.33,NEWLINE 'position/margin/auto-deposit-status': 1,NEWLINE 'position/margin/deposit-margin': 1,NEWLINE 'bullet-private': 1,NEWLINE },NEWLINE 'delete': {NEWLINE 'withdrawals/{withdrawalId}': 1,NEWLINE 'cancel/transfer-out': 1,NEWLINE 'orders/{orderId}': 1,NEWLINE 'orders': 4.44,NEWLINE 'stopOrders': 1,NEWLINE },NEWLINE },NEWLINE },NEWLINE 'exceptions': {NEWLINE 'exact': {NEWLINE '400': BadRequest, # Bad Request -- Invalid request formatNEWLINE '401': AuthenticationError, # Unauthorized -- Invalid API KeyNEWLINE '403': NotSupported, # Forbidden -- The request is forbiddenNEWLINE '404': NotSupported, # Not Found -- The specified resource could not be foundNEWLINE '405': NotSupported, # Method Not Allowed -- You tried to access the resource with an invalid method.NEWLINE '415': BadRequest, # Content-Type -- application/jsonNEWLINE '429': RateLimitExceeded, # Too Many Requests -- Access limit breachedNEWLINE '500': ExchangeNotAvailable, # Internal Server Error -- We had a problem with our server. Try again later.NEWLINE '503': ExchangeNotAvailable, # Service Unavailable -- We're temporarily offline for maintenance. Please try again later.NEWLINE '100001': InvalidOrder, # {"code":"100001","msg":"Unavailable to enable both \"postOnly\" and \"hidden\""}NEWLINE '100004': BadRequest, # {"code":"100004","msg":"Order is in not cancelable state"}NEWLINE '101030': PermissionDenied, # {"code":"101030","msg":"You haven't yet enabled the margin trading"}NEWLINE '200004': InsufficientFunds,NEWLINE '230003': InsufficientFunds, # {"code":"230003","msg":"Balance insufficient!"}NEWLINE '260100': InsufficientFunds, # {"code":"260100","msg":"account.noBalance"}NEWLINE '300003': InsufficientFunds,NEWLINE '300012': InvalidOrder,NEWLINE '400001': AuthenticationError, # Any of KC-API-KEY, KC-API-SIGN, KC-API-TIMESTAMP, KC-API-PASSPHRASE is missing in your request header.NEWLINE '400002': InvalidNonce, # KC-API-TIMESTAMP Invalid -- Time differs from server time by more than 5 secondsNEWLINE '400003': AuthenticationError, # KC-API-KEY not existsNEWLINE '400004': AuthenticationError, # KC-API-PASSPHRASE errorNEWLINE '400005': AuthenticationError, # Signature error -- Please check your signatureNEWLINE '400006': AuthenticationError, # The IP address is not in the API whitelistNEWLINE '400007': AuthenticationError, # Access Denied -- Your API key does not have sufficient permissions to access the URINEWLINE '404000': NotSupported, # URL Not Found -- The requested resource could not be foundNEWLINE '400100': BadRequest, # Parameter Error -- You tried to access the resource with invalid parametersNEWLINE '411100': AccountSuspended, # User is frozen -- Please contact us via support centerNEWLINE '500000': ExchangeNotAvailable, # Internal Server Error -- We had a problem with our server. Try again later.NEWLINE },NEWLINE },NEWLINE 'fees': {NEWLINE 'trading': {NEWLINE 'tierBased': True,NEWLINE 'percentage': True,NEWLINE 'taker': self.parse_number('0.0006'),NEWLINE 'maker': self.parse_number('0.0002'),NEWLINE 'tiers': {NEWLINE 'taker': [NEWLINE [self.parse_number('0'), self.parse_number('0.0006')],NEWLINE [self.parse_number('50'), self.parse_number('0.0006')],NEWLINE [self.parse_number('200'), self.parse_number('0.0006')],NEWLINE [self.parse_number('500'), self.parse_number('0.0005')],NEWLINE [self.parse_number('1000'), self.parse_number('0.0004')],NEWLINE [self.parse_number('2000'), self.parse_number('0.0004')],NEWLINE [self.parse_number('4000'), self.parse_number('0.00038')],NEWLINE [self.parse_number('8000'), self.parse_number('0.00035')],NEWLINE [self.parse_number('15000'), self.parse_number('0.00032')],NEWLINE [self.parse_number('25000'), self.parse_number('0.0003')],NEWLINE [self.parse_number('40000'), self.parse_number('0.0003')],NEWLINE [self.parse_number('60000'), self.parse_number('0.0003')],NEWLINE [self.parse_number('80000'), self.parse_number('0.0003')],NEWLINE ],NEWLINE 'maker': [NEWLINE [self.parse_number('0'), self.parse_number('0.02')],NEWLINE [self.parse_number('50'), self.parse_number('0.015')],NEWLINE [self.parse_number('200'), self.parse_number('0.01')],NEWLINE [self.parse_number('500'), self.parse_number('0.01')],NEWLINE [self.parse_number('1000'), self.parse_number('0.01')],NEWLINE [self.parse_number('2000'), self.parse_number('0')],NEWLINE [self.parse_number('4000'), self.parse_number('0')],NEWLINE [self.parse_number('8000'), self.parse_number('0')],NEWLINE [self.parse_number('15000'), self.parse_number('-0.003')],NEWLINE [self.parse_number('25000'), self.parse_number('-0.006')],NEWLINE [self.parse_number('40000'), self.parse_number('-0.009')],NEWLINE [self.parse_number('60000'), self.parse_number('-0.012')],NEWLINE [self.parse_number('80000'), self.parse_number('-0.015')],NEWLINE ],NEWLINE },NEWLINE },NEWLINE 'funding': {NEWLINE 'tierBased': False,NEWLINE 'percentage': False,NEWLINE 'withdraw': {},NEWLINE 'deposit': {},NEWLINE },NEWLINE },NEWLINE 'commonCurrencies': {NEWLINE 'HOT': 'HOTNOW',NEWLINE 'EDGE': 'DADI', # https://github.com/ccxt/ccxt/issues/5756NEWLINE 'WAX': 'WAXP',NEWLINE 'TRY': 'Trias',NEWLINE 'VAI': 'VAIOT',NEWLINE 'XBT': 'BTC',NEWLINE },NEWLINE 'timeframes': {NEWLINE '1m': 1,NEWLINE '3m': None,NEWLINE '5m': 5,NEWLINE '15m': 15,NEWLINE '30m': 30,NEWLINE '1h': 60,NEWLINE '2h': 120,NEWLINE '4h': 240,NEWLINE '6h': None,NEWLINE '8h': 480,NEWLINE '12h': 720,NEWLINE '1d': 1440,NEWLINE '1w': 10080,NEWLINE },NEWLINE 'options': {NEWLINE 'version': 'v1',NEWLINE 'symbolSeparator': '-',NEWLINE 'defaultType': 'swap',NEWLINE 'marginTypes': {},NEWLINE # endpoint versionsNEWLINE 'versions': {NEWLINE 'futuresPrivate': {NEWLINE 'POST': {NEWLINE 'transfer-out': 'v2',NEWLINE },NEWLINE },NEWLINE 'futuresPublic': {NEWLINE 'GET': {NEWLINE 'level3/snapshot': 'v2',NEWLINE },NEWLINE },NEWLINE },NEWLINE 'networks': {NEWLINE 'OMNI': 'omni',NEWLINE 'ERC20': 'eth',NEWLINE 'TRC20': 'trx',NEWLINE },NEWLINE },NEWLINE })NEWLINENEWLINE def fetch_accounts(self, params={}):NEWLINE raise BadRequest(self.id + ' has no method fetchAccounts')NEWLINENEWLINE def load_time_difference(self, params={}):NEWLINE response = self.futuresPublicGetTimestamp(params)NEWLINE after = self.milliseconds()NEWLINE kucoinTime = self.safe_integer(response, 'data')NEWLINE self.options['timeDifference'] = int(after - kucoinTime)NEWLINE return self.options['timeDifference']NEWLINENEWLINE def fetch_status(self, params={}):NEWLINE response = self.futuresPublicGetStatus(params)NEWLINE #NEWLINE # {NEWLINE # "code":"200000",NEWLINE # "data":{NEWLINE # "msg":"",NEWLINE # "status":"open"NEWLINE # }NEWLINE # }NEWLINE #NEWLINE data = self.safe_value(response, 'data', {})NEWLINE status = self.safe_value(data, 'status')NEWLINE if status is not None:NEWLINE status = 'ok' if (status == 'open') else 'maintenance'NEWLINE self.status = self.extend(self.status, {NEWLINE 'status': status,NEWLINE 'updated': self.milliseconds(),NEWLINE })NEWLINE return self.statusNEWLINENEWLINE def fetch_markets(self, params={}):NEWLINE response = self.futuresPublicGetContractsActive(params)NEWLINE #NEWLINE # {NEWLINE # "code": "200000",NEWLINE # "data": {NEWLINE # "symbol": "ETHUSDTM",NEWLINE # "rootSymbol": "USDT",NEWLINE # "type": "FFWCSX",NEWLINE # "firstOpenDate": 1591086000000,NEWLINE # "expireDate": null,NEWLINE # "settleDate": null,NEWLINE # "baseCurrency": "ETH",NEWLINE # "quoteCurrency": "USDT",NEWLINE # "settleCurrency": "USDT",NEWLINE # "maxOrderQty": 1000000,NEWLINE # "maxPrice": 1000000.0000000000,NEWLINE # "lotSize": 1,NEWLINE # "tickSize": 0.05,NEWLINE # "indexPriceTickSize": 0.01,NEWLINE # "multiplier": 0.01,NEWLINE # "initialMargin": 0.01,NEWLINE # "maintainMargin": 0.005,NEWLINE # "maxRiskLimit": 1000000,NEWLINE # "minRiskLimit": 1000000,NEWLINE # "riskStep": 500000,NEWLINE # "makerFeeRate": 0.00020,NEWLINE # "takerFeeRate": 0.00060,NEWLINE # "takerFixFee": 0.0000000000,NEWLINE # "makerFixFee": 0.0000000000,NEWLINE # "settlementFee": null,NEWLINE # "isDeleverage": True,NEWLINE # "isQuanto": True,NEWLINE # "isInverse": False,NEWLINE # "markMethod": "FairPrice",NEWLINE # "fairMethod": "FundingRate",NEWLINE # "fundingBaseSymbol": ".ETHINT8H",NEWLINE # "fundingQuoteSymbol": ".USDTINT8H",NEWLINE # "fundingRateSymbol": ".ETHUSDTMFPI8H",NEWLINE # "indexSymbol": ".KETHUSDT",NEWLINE # "settlementSymbol": "",NEWLINE # "status": "Open",NEWLINE # "fundingFeeRate": 0.000535,NEWLINE # "predictedFundingFeeRate": 0.002197,NEWLINE # "openInterest": "8724443",NEWLINE # "turnoverOf24h": 341156641.03354263,NEWLINE # "volumeOf24h": 74833.54000000,NEWLINE # "markPrice": 4534.07,NEWLINE # "indexPrice":4531.92,NEWLINE # "lastTradePrice": 4545.4500000000,NEWLINE # "nextFundingRateTime": 25481884,NEWLINE # "maxLeverage": 100,NEWLINE # "sourceExchanges": [NEWLINE # "huobi",NEWLINE # "Okex",NEWLINE # "Binance",NEWLINE # "Kucoin",NEWLINE # "Poloniex",NEWLINE # "Hitbtc"NEWLINE # ],NEWLINE # "premiumsSymbol1M": ".ETHUSDTMPI",NEWLINE # "premiumsSymbol8H": ".ETHUSDTMPI8H",NEWLINE # "fundingBaseSymbol1M": ".ETHINT",NEWLINE # "fundingQuoteSymbol1M": ".USDTINT",NEWLINE # "lowPrice": 4456.90,NEWLINE # "highPrice": 4674.25,NEWLINE # "priceChgPct": 0.0046,NEWLINE # "priceChg": 21.15NEWLINE # }NEWLINE # }NEWLINE #NEWLINE result = []NEWLINE data = self.safe_value(response, 'data')NEWLINE for i in range(0, len(data)):NEWLINE market = data[i]NEWLINE id = self.safe_string(market, 'symbol')NEWLINE expiry = self.safe_integer(market, 'expireDate')NEWLINE futures = True if expiry else FalseNEWLINE swap = not futuresNEWLINE baseId = self.safe_string(market, 'baseCurrency')NEWLINE quoteId = self.safe_string(market, 'quoteCurrency')NEWLINE settleId = self.safe_string(market, 'settleCurrency')NEWLINE base = self.safe_currency_code(baseId)NEWLINE quote = self.safe_currency_code(quoteId)NEWLINE settle = self.safe_currency_code(settleId)NEWLINE symbol = base + '/' + quote + ':' + settleNEWLINE type = 'swap'NEWLINE if futures:NEWLINE symbol = symbol + '-' + self.yymmdd(expiry, '')NEWLINE type = 'futures'NEWLINE baseMaxSize = self.safe_number(market, 'baseMaxSize')NEWLINE baseMinSizeString = self.safe_string(market, 'baseMinSize')NEWLINE quoteMaxSizeString = self.safe_string(market, 'quoteMaxSize')NEWLINE baseMinSize = self.parse_number(baseMinSizeString)NEWLINE quoteMaxSize = self.parse_number(quoteMaxSizeString)NEWLINE quoteMinSize = self.safe_number(market, 'quoteMinSize')NEWLINE inverse = self.safe_value(market, 'isInverse')NEWLINE # quoteIncrement = self.safe_number(market, 'quoteIncrement')NEWLINE amount = self.safe_string(market, 'baseIncrement')NEWLINE price = self.safe_string(market, 'priceIncrement')NEWLINE result.append({NEWLINE 'id': id,NEWLINE 'symbol': symbol,NEWLINE 'baseId': baseId,NEWLINE 'quoteId': quoteId,NEWLINE 'settleId': settleId,NEWLINE 'base': base,NEWLINE 'quote': quote,NEWLINE 'settle': settle,NEWLINE 'type': type,NEWLINE 'spot': False,NEWLINE 'margin': False,NEWLINE 'swap': swap,NEWLINE 'futures': futures,NEWLINE 'option': False,NEWLINE 'active': True,NEWLINE 'derivative': True,NEWLINE 'contract': True,NEWLINE 'linear': inverse is not True,NEWLINE 'inverse': inverse,NEWLINE 'taker': self.safe_number(market, 'takerFeeRate'),NEWLINE 'maker': self.safe_number(market, 'makerFeeRate'),NEWLINE 'contractSize': self.parse_number(Precise.string_abs(self.safe_string(market, 'multiplier'))),NEWLINE 'expiry': self.parse_number(expiry),NEWLINE 'expiryDatetime': self.iso8601(expiry),NEWLINE 'strike': None,NEWLINE 'optionType': None,NEWLINE 'precision': {NEWLINE 'amount': self.precision_from_string(amount) if amount else None,NEWLINE 'price': self.precision_from_string(price) if price else None,NEWLINE },NEWLINE 'limits': {NEWLINE 'leverage': {NEWLINE 'min': self.parse_number('1'),NEWLINE 'max': self.safe_number(market, 'maxLeverage', 1),NEWLINE },NEWLINE 'amount': {NEWLINE 'min': baseMinSize,NEWLINE 'max': baseMaxSize,NEWLINE },NEWLINE 'price': {NEWLINE 'min': price,NEWLINE 'max': self.parse_number(Precise.string_div(quoteMaxSizeString, baseMinSizeString)),NEWLINE },NEWLINE 'cost': {NEWLINE 'min': quoteMinSize,NEWLINE 'max': quoteMaxSize,NEWLINE },NEWLINE },NEWLINE 'info': market,NEWLINE })NEWLINE return resultNEWLINENEWLINE def fetch_time(self, params={}):NEWLINE response = self.futuresPublicGetTimestamp(params)NEWLINE #NEWLINE # {NEWLINE # code: "200000",NEWLINE # data: 1637385119302,NEWLINE # }NEWLINE #NEWLINE return self.safe_number(response, 'data')NEWLINENEWLINE def fetch_ohlcv(self, symbol, timeframe='15m', since=None, limit=None, params={}):NEWLINE self.load_markets()NEWLINE market = self.market(symbol)NEWLINE marketId = market['id']NEWLINE request = {NEWLINE 'symbol': marketId,NEWLINE 'granularity': self.timeframes[timeframe],NEWLINE }NEWLINE duration = self.parse_timeframe(timeframe) * 1000NEWLINE endAt = self.milliseconds()NEWLINE if since is not None:NEWLINE request['from'] = sinceNEWLINE if limit is None:NEWLINE limit = self.safe_integer(self.options, 'fetchOHLCVLimit', 200)NEWLINE endAt = self.sum(since, limit * duration)NEWLINE elif limit is not None:NEWLINE since = endAt - limit * durationNEWLINE request['from'] = sinceNEWLINE request['to'] = endAtNEWLINE response = self.futuresPublicGetKlineQuery(self.extend(request, params))NEWLINE #NEWLINE # {NEWLINE # "code": "200000",NEWLINE # "data": [NEWLINE # [1636459200000, 4779.3, 4792.1, 4768.7, 4770.3, 78051],NEWLINE # [1636460100000, 4770.25, 4778.55, 4757.55, 4777.25, 80164],NEWLINE # [1636461000000, 4777.25, 4791.45, 4774.5, 4791.3, 51555]NEWLINE # ]NEWLINE # }NEWLINE #NEWLINE data = self.safe_value(response, 'data', [])NEWLINE return self.parse_ohlcvs(data, market, timeframe, since, limit)NEWLINENEWLINE def parse_ohlcv(self, ohlcv, market=None):NEWLINE #NEWLINE # [NEWLINE # "1545904980000", # Start time of the candle cycleNEWLINE # "0.058", # opening priceNEWLINE # "0.049", # closing priceNEWLINE # "0.058", # highest priceNEWLINE # "0.049", # lowest priceNEWLINE # "0.018", # base volumeNEWLINE # "0.000945", # quote volumeNEWLINE # ]NEWLINE #NEWLINE return [NEWLINE self.safe_integer(ohlcv, 0),NEWLINE self.safe_number(ohlcv, 1),NEWLINE self.safe_number(ohlcv, 2),NEWLINE self.safe_number(ohlcv, 3),NEWLINE self.safe_number(ohlcv, 4),NEWLINE self.safe_number(ohlcv, 5),NEWLINE ]NEWLINENEWLINE def create_deposit_address(self, code, params={}):NEWLINE raise BadRequest(self.id + ' has no method createDepositAddress')NEWLINENEWLINE def fetch_deposit_address(self, code, params={}):NEWLINE self.load_markets()NEWLINE currency = self.currency(code)NEWLINE currencyId = currency['id']NEWLINE request = {NEWLINE 'currency': currencyId, # Currency,including XBT,USDTNEWLINE }NEWLINE response = self.futuresPrivateGetDepositAddress(self.extend(request, params))NEWLINE #NEWLINE # {NEWLINE # "code": "200000",NEWLINE # "data": {NEWLINE # "address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1",//Deposit addressNEWLINE # "memo": null//Address tag. If the returned value is null, it means that the requested token has no memo. If you are to transfer funds from another platform to KuCoin Futures and if the token to be #transferred has memo(tag), you need to fill in the memo to ensure the transferred funds will be sent #to the address you specified.NEWLINE # }NEWLINE # }NEWLINE #NEWLINE data = self.safe_value(response, 'data', {})NEWLINE address = self.safe_string(data, 'address')NEWLINE if currencyId != 'NIM':NEWLINE # contains spacesNEWLINE self.check_address(address)NEWLINE return {NEWLINE 'info': response,NEWLINE 'currency': currencyId,NEWLINE 'address': address,NEWLINE 'tag': self.safe_string(data, 'memo'),NEWLINE 'network': self.safe_string(data, 'chain'),NEWLINE }NEWLINENEWLINE def fetch_order_book(self, symbol, limit=None, params={}):NEWLINE self.load_markets()NEWLINE level = self.safe_number(params, 'level')NEWLINE if level != 2 and level is not None:NEWLINE raise BadRequest(self.id + ' fetchOrderBook can only return level 2')NEWLINE market = self.market(symbol)NEWLINE request = {NEWLINE 'symbol': market['id'],NEWLINE }NEWLINE if limit is not None:NEWLINE if (limit == 20) or (limit == 100):NEWLINE request['limit'] = limitNEWLINE else:NEWLINE raise BadRequest(self.id + ' fetchOrderBook limit argument must be 20 or 100')NEWLINE else:NEWLINE request['limit'] = 20NEWLINE response = self.futuresPublicGetLevel2DepthLimit(self.extend(request, params))NEWLINE #NEWLINE # {NEWLINE # "code": "200000",NEWLINE # "data": {NEWLINE # "symbol": "XBTUSDM", #SymbolNEWLINE # "sequence": 100, #Ticker sequence numberNEWLINE # "asks": [NEWLINE # ["5000.0", 1000], #Price, quantityNEWLINE # ["6000.0", 1983] #Price, quantityNEWLINE # ],NEWLINE # "bids": [NEWLINE # ["3200.0", 800], #Price, quantityNEWLINE # ["3100.0", 100] #Price, quantityNEWLINE # ],NEWLINE # "ts": 1604643655040584408 # timestampNEWLINE # }NEWLINE # }NEWLINE #NEWLINE data = self.safe_value(response, 'data', {})NEWLINE timestamp = int(self.safe_integer(data, 'ts') / 1000000)NEWLINE orderbook = self.parse_order_book(data, symbol, timestamp, 'bids', 'asks', 0, 1)NEWLINE orderbook['nonce'] = self.safe_integer(data, 'sequence')NEWLINE return orderbookNEWLINENEWLINE def fetch_l3_order_book(self, symbol, limit=None, params={}):NEWLINE raise BadRequest(self.id + ' only can only fetch the L2 order book')NEWLINENEWLINE def fetch_ticker(self, symbol, params={}):NEWLINE self.load_markets()NEWLINE market = self.market(symbol)NEWLINE request = {NEWLINE 'symbol': market['id'],NEWLINE }NEWLINE response = self.futuresPublicGetTicker(self.extend(request, params))NEWLINE #NEWLINE # {NEWLINE # "code": "200000",NEWLINE # "data": {NEWLINE # "sequence": 1638444978558,NEWLINE # "symbol": "ETHUSDTM",NEWLINE # "side": "sell",NEWLINE # "size": 4,NEWLINE # "price": "4229.35",NEWLINE # "bestBidSize": 2160,NEWLINE # "bestBidPrice": "4229.0",NEWLINE # "bestAskPrice": "4229.05",NEWLINE # "tradeId": "61aaa8b777a0c43055fe4851",NEWLINE # "ts": 1638574296209786785,NEWLINE # "bestAskSize": 36,NEWLINE # }NEWLINE # }NEWLINE #NEWLINE return self.parse_ticker(response['data'], market)NEWLINENEWLINE def parse_ticker(self, ticker, market=None):NEWLINE #NEWLINE # {NEWLINE # "code": "200000",NEWLINE # "data": {NEWLINE # "sequence": 1629930362547,NEWLINE # "symbol": "ETHUSDTM",NEWLINE # "side": "buy",NEWLINE # "size": 130,NEWLINE # "price": "4724.7",NEWLINE # "bestBidSize": 5,NEWLINE # "bestBidPrice": "4724.6",NEWLINE # "bestAskPrice": "4724.65",NEWLINE # "tradeId": "618d2a5a77a0c4431d2335f4",NEWLINE # "ts": 1636641371963227600,NEWLINE # "bestAskSize": 1789NEWLINE # }NEWLINE # }NEWLINE #NEWLINE last = self.safe_number(ticker, 'price')NEWLINE marketId = self.safe_string(ticker, 'symbol')NEWLINE market = self.safe_market(marketId, market, '-')NEWLINE timestamp = Precise.string_div(self.safe_string(ticker, 'ts'), '1000000')NEWLINE return self.safe_ticker({NEWLINE 'symbol': market['symbol'],NEWLINE 'timestamp': timestamp,NEWLINE 'datetime': self.iso8601(timestamp),NEWLINE 'high': None,NEWLINE 'low': None,NEWLINE 'bid': self.safe_number(ticker, 'bestBidPrice'),NEWLINE 'bidVolume': self.safe_number(ticker, 'bestBidSize'),NEWLINE 'ask': self.safe_number(ticker, 'bestAskPrice'),NEWLINE 'askVolume': self.safe_number(ticker, 'bestAskSize'),NEWLINE 'vwap': None,NEWLINE 'open': None,NEWLINE 'close': last,NEWLINE 'last': last,NEWLINE 'previousClose': None,NEWLINE 'change': None,NEWLINE 'percentage': None,NEWLINE 'average': None,NEWLINE 'baseVolume': None,NEWLINE 'quoteVolume': None,NEWLINE 'info': ticker,NEWLINE }, market)NEWLINENEWLINE def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):NEWLINE #NEWLINE # PrivateNEWLINE # @param symbol(string): The pair for which the contract was tradedNEWLINE # @param since(number): The unix start time of the first funding payment requestedNEWLINE # @param limit(number): The number of results to returnNEWLINE # @param params(dict): Additional parameters to send to the APINEWLINE # @param return: Data for the history of the accounts funding payments for futures contractsNEWLINE #NEWLINE if symbol is None:NEWLINE raise ArgumentsRequired(self.id + ' fetchFundingHistory() requires a symbol argument')NEWLINE self.load_markets()NEWLINE market = self.market(symbol)NEWLINE request = {NEWLINE 'symbol': market['id'],NEWLINE }NEWLINE if since is not None:NEWLINE request['startAt'] = sinceNEWLINE if limit is not None:NEWLINE # * Since is ignored if limit is definedNEWLINE request['maxCount'] = limitNEWLINE response = self.futuresPrivateGetFundingHistory(self.extend(request, params))NEWLINE #NEWLINE # {NEWLINE # "code": "200000",NEWLINE # "data": {NEWLINE # "dataList": [NEWLINE # {NEWLINE # "id": 239471298749817,NEWLINE # "symbol": "ETHUSDTM",NEWLINE # "timePoint": 1638532800000,NEWLINE # "fundingRate": 0.000100,NEWLINE # "markPrice": 4612.8300000000,NEWLINE # "positionQty": 12,NEWLINE # "positionCost": 553.5396000000,NEWLINE # "funding": -0.0553539600,NEWLINE # "settleCurrency": "USDT"NEWLINE # },NEWLINE # ...NEWLINE # ],NEWLINE # "hasMore": TrueNEWLINE # }NEWLINE # }NEWLINE #NEWLINE data = self.safe_value(response, 'data')NEWLINE dataList = self.safe_value(data, 'dataList')NEWLINE fees = []NEWLINE for i in range(0, len(dataList)):NEWLINE listItem = dataList[i]NEWLINE timestamp = self.safe_integer(listItem, 'timePoint')NEWLINE fees.append({NEWLINE 'info': listItem,NEWLINE 'symbol': symbol,NEWLINE 'code': self.safe_currency_code(self.safe_string(listItem, 'settleCurrency')),NEWLINE 'timestamp': timestamp,NEWLINE 'datetime': self.iso8601(timestamp),NEWLINE 'id': self.safe_number(listItem, 'id'),NEWLINE 'amount': self.safe_number(listItem, 'funding'),NEWLINE 'fundingRate': self.safe_number(listItem, 'fundingRate'),NEWLINE 'markPrice': self.safe_number(listItem, 'markPrice'),NEWLINE 'positionQty': self.safe_number(listItem, 'positionQty'),NEWLINE 'positionCost': self.safe_number(listItem, 'positionCost'),NEWLINE })NEWLINE return feesNEWLINENEWLINE def fetch_positions(self, symbols=None, params={}):NEWLINE response = self.futuresPrivateGetPositions(params)NEWLINE #NEWLINE # {NEWLINE # "code": "200000",NEWLINE # "data": [NEWLINE # {NEWLINE # "id": "615ba79f83a3410001cde321",NEWLINE # "symbol": "ETHUSDTM",NEWLINE # "autoDeposit": False,NEWLINE # "maintMarginReq": 0.005,NEWLINE # "riskLimit": 1000000,NEWLINE # "realLeverage": 18.61,NEWLINE # "crossMode": False,NEWLINE # "delevPercentage": 0.86,NEWLINE # "openingTimestamp": 1638563515618,NEWLINE # "currentTimestamp": 1638576872774,NEWLINE # "currentQty": 2,NEWLINE # "currentCost": 83.64200000,NEWLINE # "currentComm": 0.05018520,NEWLINE # "unrealisedCost": 83.64200000,NEWLINE # "realisedGrossCost": 0.00000000,NEWLINE # "realisedCost": 0.05018520,NEWLINE # "isOpen": True,NEWLINE # "markPrice": 4225.01,NEWLINE # "markValue": 84.50020000,NEWLINE # "posCost": 83.64200000,NEWLINE # "posCross": 0.0000000000,NEWLINE # "posInit": 3.63660870,NEWLINE # "posComm": 0.05236717,NEWLINE # "posLoss": 0.00000000,NEWLINE # "posMargin": 3.68897586,NEWLINE # "posMaint": 0.50637594,NEWLINE # "maintMargin": 4.54717586,NEWLINE # "realisedGrossPnl": 0.00000000,NEWLINE # "realisedPnl": -0.05018520,NEWLINE # "unrealisedPnl": 0.85820000,NEWLINE # "unrealisedPnlPcnt": 0.0103,NEWLINE # "unrealisedRoePcnt": 0.2360,NEWLINE # "avgEntryPrice": 4182.10,NEWLINE # "liquidationPrice": 4023.00,NEWLINE # "bankruptPrice": 4000.25,NEWLINE # "settleCurrency": "USDT",NEWLINE # "isInverse": FalseNEWLINE # }NEWLINE # ]NEWLINE # }NEWLINE #NEWLINE return self.parse_positions(self.safe_value(response, 'data'))NEWLINENEWLINE def parse_positions(self, positions):NEWLINE result = []NEWLINE for i in range(0, len(positions)):NEWLINE result.append(self.parse_position(positions[i]))NEWLINE return resultNEWLINENEWLINE def parse_position(self, position, market=None):NEWLINE #NEWLINE # {NEWLINE # "code": "200000",NEWLINE # "data": [NEWLINE # {NEWLINE # "id": "615ba79f83a3410001cde321", # Position IDNEWLINE # "symbol": "ETHUSDTM", # SymbolNEWLINE # "autoDeposit": False, # Auto deposit margin or notNEWLINE # "maintMarginReq": 0.005, # Maintenance margin requirementNEWLINE # "riskLimit": 1000000, # Risk limitNEWLINE # "realLeverage": 25.92, # Leverage of the orderNEWLINE # "crossMode": False, # Cross mode or notNEWLINE # "delevPercentage": 0.76, # ADL ranking percentileNEWLINE # "openingTimestamp": 1638578546031, # Open timeNEWLINE # "currentTimestamp": 1638578563580, # Current timestampNEWLINE # "currentQty": 2, # Current postion quantityNEWLINE # "currentCost": 83.787, # Current postion valueNEWLINE # "currentComm": 0.0167574, # Current commissionNEWLINE # "unrealisedCost": 83.787, # Unrealised valueNEWLINE # "realisedGrossCost": 0.0, # Accumulated realised gross profit valueNEWLINE # "realisedCost": 0.0167574, # Current realised position valueNEWLINE # "isOpen": True, # Opened position or notNEWLINE # "markPrice": 4183.38, # Mark priceNEWLINE # "markValue": 83.6676, # Mark valueNEWLINE # "posCost": 83.787, # Position valueNEWLINE # "posCross": 0.0, # added marginNEWLINE # "posInit": 3.35148, # Leverage marginNEWLINE # "posComm": 0.05228309, # Bankruptcy costNEWLINE # "posLoss": 0.0, # Funding fees paid outNEWLINE # "posMargin": 3.40376309, # Position marginNEWLINE # "posMaint": 0.50707892, # Maintenance marginNEWLINE # "maintMargin": 3.28436309, # Position marginNEWLINE # "realisedGrossPnl": 0.0, # Accumulated realised gross profit valueNEWLINE # "realisedPnl": -0.0167574, # Realised profit and lossNEWLINE # "unrealisedPnl": -0.1194, # Unrealised profit and lossNEWLINE # "unrealisedPnlPcnt": -0.0014, # Profit-loss ratio of the positionNEWLINE # "unrealisedRoePcnt": -0.0356, # Rate of return on investmentNEWLINE # "avgEntryPrice": 4189.35, # Average entry priceNEWLINE # "liquidationPrice": 4044.55, # Liquidation priceNEWLINE # "bankruptPrice": 4021.75, # Bankruptcy priceNEWLINE # "settleCurrency": "USDT", # Currency used to clear and settle the tradesNEWLINE # "isInverse": FalseNEWLINE # }NEWLINE # ]NEWLINE # }NEWLINE #NEWLINE symbol = self.safe_string(position, 'symbol')NEWLINE market = self.safe_market(symbol, market)NEWLINE timestamp = self.safe_number(position, 'currentTimestamp')NEWLINE size = self.safe_string(position, 'currentQty')NEWLINE side = NoneNEWLINE if Precise.string_gt(size, '0'):NEWLINE side = 'buy'NEWLINE elif Precise.string_lt(size, '0'):NEWLINE side = 'sell'NEWLINE notional = Precise.string_abs(self.safe_string(position, 'posCost'))NEWLINE initialMargin = self.safe_string(position, 'posMargin')NEWLINE initialMarginPercentage = Precise.string_div(initialMargin, notional)NEWLINE leverage = Precise.string_div('1', initialMarginPercentage) # TODO: Not quite rightNEWLINE # marginRatio = Precise.string_div(maintenanceRate, collateral)NEWLINE unrealisedPnl = self.safe_string(position, 'unrealisedPnl')NEWLINE return {NEWLINE 'info': position,NEWLINE 'symbol': self.safe_string(market, 'symbol'),NEWLINE 'timestamp': timestamp,NEWLINE 'datetime': self.iso8601(timestamp),NEWLINE 'initialMargin': self.parse_number(initialMargin),NEWLINE 'initialMarginPercentage': self.parse_number(initialMarginPercentage),NEWLINE 'maintenanceMargin': self.safe_number(position, 'maintMargin'),NEWLINE 'maintenanceMarginPercentage': self.safe_string(position, 'maintMarginReq'),NEWLINE 'entryPrice': self.safe_number(position, 'avgEntryPrice'),NEWLINE 'notional': self.parse_number(notional),NEWLINE 'leverage': self.parse_number(leverage),NEWLINE 'unrealizedPnl': self.parse_number(unrealisedPnl),NEWLINE 'contracts': self.parse_number(Precise.string_abs(size)),NEWLINE 'contractSize': self.safe_number(market, 'contractSize'),NEWLINE # realisedPnl: position['realised_pnl'],NEWLINE 'marginRatio': None,NEWLINE 'liquidationPrice': self.safe_number(position, 'liquidationPrice'),NEWLINE 'markPrice': self.safe_number(position, 'markPrice'),NEWLINE 'collateral': self.safe_number(position, 'posInit'),NEWLINE 'marginType': None,NEWLINE 'side': side,NEWLINE 'percentage': self.parse_number(Precise.string_div(unrealisedPnl, initialMargin)),NEWLINE }NEWLINENEWLINE def create_order(self, symbol, type, side, amount, price=None, params={}):NEWLINE self.load_markets()NEWLINE market = self.market(symbol)NEWLINE # required param, cannot be used twiceNEWLINE clientOrderId = self.safe_string_2(params, 'clientOid', 'clientOrderId', self.uuid())NEWLINE params = self.omit(params, ['clientOid', 'clientOrderId'])NEWLINE leverage = self.safe_number(params, 'leverage')NEWLINE if not leverage:NEWLINE raise ArgumentsRequired(self.id + ' createOrder requires params.leverage')NEWLINE if amount < 1:NEWLINE raise InvalidOrder('Minimum contract order size using ' + self.id + ' is 1')NEWLINE preciseAmount = int(self.amount_to_precision(symbol, amount))NEWLINE request = {NEWLINE 'clientOid': clientOrderId,NEWLINE 'side': side,NEWLINE 'symbol': market['id'],NEWLINE 'type': type, # limit or marketNEWLINE 'size': preciseAmount,NEWLINE # 'remark': '', # optional remark for the order, length cannot exceed 100 utf8 charactersNEWLINE # 'tradeType': 'TRADE', # TRADE, MARGIN_TRADE # not used with margin ordersNEWLINE # limit orders ---------------------------------------------------NEWLINE # 'timeInForce': 'GTC', # GTC, GTT, IOC, or FOK(default is GTC), limit orders onlyNEWLINE # 'cancelAfter': long, # cancel after n seconds, requires timeInForce to be GTTNEWLINE # 'postOnly': False, # Post only flag, invalid when timeInForce is IOC or FOKNEWLINE # 'hidden': False, # Order will not be displayed in the order bookNEWLINE # 'iceberg': False, # Only a portion of the order is displayed in the order bookNEWLINE # 'visibleSize': self.amount_to_precision(symbol, visibleSize), # The maximum visible size of an iceberg orderNEWLINE # market orders --------------------------------------------------NEWLINE # 'funds': self.cost_to_precision(symbol, cost), # Amount of quote currency to useNEWLINE # stop orders ----------------------------------------------------NEWLINE # 'stop': 'loss', # loss or entry, the default is loss, requires stopPriceNEWLINE # 'stopPrice': self.price_to_precision(symbol, amount), # need to be defined if stop is specifiedNEWLINE # 'stopPriceType' # Either TP, IP or MP, Need to be defined if stop is specified.NEWLINE # margin orders --------------------------------------------------NEWLINE # 'marginMode': 'cross', # cross(cross mode) and isolated(isolated mode), set to cross by default, the isolated mode will be released soon, stay tunedNEWLINE # 'autoBorrow': False, # The system will first borrow you funds at the optimal interest rate and then place an order for youNEWLINE # futures orders -------------------------------------------------NEWLINE # reduceOnly #(boolean) A mark to reduce the position size only. Set to False by default. Need to set the position size when reduceOnly is True.NEWLINE # closeOrder #(boolean) A mark to close the position. Set to False by default. It will close all the positions when closeOrder is True.NEWLINE # forceHold #(boolean) A mark to forcely hold the funds for an order, even though it's an order to reduce the position size. This helps the order stay on the order book and not get canceled when the position size changes. Set to False by default.NEWLINE }NEWLINE stopPrice = self.safe_number(params, 'stopPrice')NEWLINE if stopPrice:NEWLINE request['stop'] = side.upper() == 'down' if 'BUY' else 'up'NEWLINE stopPriceType = self.safe_string(params, 'stopPriceType')NEWLINE if not stopPriceType:NEWLINE raise ArgumentsRequired(self.id + ' trigger orders require params.stopPriceType to be set to TP, IP or MP(Trade Price, Index Price or Mark Price)')NEWLINE uppercaseType = type.upper()NEWLINE timeInForce = self.safe_string(params, 'timeInForce')NEWLINE if uppercaseType == 'LIMIT':NEWLINE if price is None:NEWLINE raise ArgumentsRequired(self.id + ' limit orders require the price argument')NEWLINE else:NEWLINE request['price'] = self.price_to_precision(symbol, price)NEWLINE if timeInForce is not None:NEWLINE timeInForce = timeInForce.upper()NEWLINE request['timeInForce'] = timeInForceNEWLINE postOnly = self.safe_value(params, 'postOnly', False)NEWLINE hidden = self.safe_value(params, 'hidden')NEWLINE if postOnly and hidden is not None:NEWLINE raise BadRequest(self.id + ' createOrder cannot contain both params.postOnly and params.hidden')NEWLINE iceberg = self.safe_value(params, 'iceberg')NEWLINE if iceberg:NEWLINE visibleSize = self.safe_value(params, 'visibleSize')NEWLINE if visibleSize is None:NEWLINE raise ArgumentsRequired(self.id + ' requires params.visibleSize for iceberg orders')NEWLINE params = self.omit(params, 'timeInForce') # Time in force only valid for limit orders, exchange error when gtc for market ordersNEWLINE response = self.futuresPrivatePostOrders(self.extend(request, params))NEWLINE #NEWLINE # {NEWLINE # code: "200000",NEWLINE # data: {NEWLINE # orderId: "619717484f1d010001510cde",NEWLINE # },NEWLINE # }NEWLINE #NEWLINE data = self.safe_value(response, 'data', {})NEWLINE return {NEWLINE 'id': self.safe_string(data, 'orderId'),NEWLINE 'clientOrderId': clientOrderId,NEWLINE 'timestamp': None,NEWLINE 'datetime': None,NEWLINE 'lastTradeTimestamp': None,NEWLINE 'symbol': symbol,NEWLINE 'type': type,NEWLINE 'side': side,NEWLINE 'price': price,NEWLINE 'amount': preciseAmount,NEWLINE 'cost': None,NEWLINE 'average': None,NEWLINE 'filled': None,NEWLINE 'remaining': None,NEWLINE 'status': None,NEWLINE 'fee': None,NEWLINE 'trades': None,NEWLINE 'timeInForce': timeInForce,NEWLINE 'postOnly': postOnly,NEWLINE 'stopPrice': stopPrice,NEWLINE 'info': data,NEWLINE }NEWLINENEWLINE def cancel_order(self, id, symbol=None, params={}):NEWLINE self.load_markets()NEWLINE request = {NEWLINE 'orderId': id,NEWLINE }NEWLINE response = self.futuresPrivateDeleteOrdersOrderId(self.extend(request, params))NEWLINE #NEWLINE # {NEWLINE # code: "200000",NEWLINE # data: {NEWLINE # cancelledOrderIds: [NEWLINE # "619714b8b6353000014c505a",NEWLINE # ],NEWLINE # },NEWLINE # }NEWLINE #NEWLINE return self.safe_value(response, 'data')NEWLINENEWLINE def cancel_all_orders(self, symbol=None, params={}):NEWLINE self.load_markets()NEWLINE request = {}NEWLINE if symbol is not None:NEWLINE request['symbol'] = self.market_id(symbol)NEWLINE response = self.futuresPrivateDeleteOrders(self.extend(request, params))NEWLINE # ? futuresPrivateDeleteStopOrdersNEWLINE # {NEWLINE # code: "200000",NEWLINE # data: {NEWLINE # cancelledOrderIds: [NEWLINE # "619714b8b6353000014c505a",NEWLINE # ],NEWLINE # },NEWLINE # }NEWLINE #NEWLINE return self.safe_value(response, 'data')NEWLINENEWLINE def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}):NEWLINE self.load_markets()NEWLINE request = {NEWLINE 'status': status,NEWLINE }NEWLINE market = NoneNEWLINE if symbol is not None:NEWLINE market = self.market(symbol)NEWLINE request['symbol'] = market['id']NEWLINE if since is not None:NEWLINE request['startAt'] = sinceNEWLINE response = self.futuresPrivateGetOrders(self.extend(request, params))NEWLINE responseData = self.safe_value(response, 'data', {})NEWLINE orders = self.safe_value(responseData, 'items', [])NEWLINE return self.parse_orders(orders, market, since, limit)NEWLINENEWLINE def fetch_order(self, id=None, symbol=None, params={}):NEWLINE self.load_markets()NEWLINE request = {}NEWLINE method = 'futuresPrivateGetOrdersOrderId'NEWLINE if id is None:NEWLINE clientOrderId = self.safe_string_2(params, 'clientOid', 'clientOrderId')NEWLINE if clientOrderId is None:NEWLINE raise InvalidOrder(self.id + ' fetchOrder() requires parameter id or params.clientOid')NEWLINE request['clientOid'] = clientOrderIdNEWLINE method = 'futuresPrivateGetOrdersByClientOid'NEWLINE params = self.omit(params, ['clientOid', 'clientOrderId'])NEWLINE else:NEWLINE request['orderId'] = idNEWLINE response = getattr(self, method)(self.extend(request, params))NEWLINE market = symbol is not self.market(symbol) if None else NoneNEWLINE responseData = self.safe_value(response, 'data')NEWLINE return self.parse_order(responseData, market)NEWLINENEWLINE def parse_order(self, order, market=None):NEWLINE marketId = self.safe_string(order, 'symbol')NEWLINE symbol = self.safe_symbol(marketId, market, '-')NEWLINE orderId = self.safe_string(order, 'id')NEWLINE type = self.safe_string(order, 'type')NEWLINE timestamp = self.safe_integer(order, 'createdAt')NEWLINE datetime = self.iso8601(timestamp)NEWLINE price = self.safe_string(order, 'price')NEWLINE # price is zero for market orderNEWLINE # omitZero is called in safeOrder2NEWLINE side = self.safe_string(order, 'side')NEWLINE feeCurrencyId = self.safe_string(order, 'feeCurrency')NEWLINE feeCurrency = self.safe_currency_code(feeCurrencyId)NEWLINE feeCost = self.safe_number(order, 'fee')NEWLINE amount = self.safe_string(order, 'size')NEWLINE filled = self.safe_string(order, 'dealSize')NEWLINE cost = self.safe_string(order, 'dealFunds')NEWLINE # boolNEWLINE isActive = self.safe_value(order, 'isActive', False)NEWLINE cancelExist = self.safe_value(order, 'cancelExist', False)NEWLINE status = 'open' if isActive else 'closed'NEWLINE status = 'canceled' if cancelExist else statusNEWLINE fee = {NEWLINE 'currency': feeCurrency,NEWLINE 'cost': feeCost,NEWLINE }NEWLINE clientOrderId = self.safe_string(order, 'clientOid')NEWLINE timeInForce = self.safe_string(order, 'timeInForce')NEWLINE stopPrice = self.safe_number(order, 'stopPrice')NEWLINE postOnly = self.safe_value(order, 'postOnly')NEWLINE return self.safeOrder2({NEWLINE 'id': orderId,NEWLINE 'clientOrderId': clientOrderId,NEWLINE 'symbol': symbol,NEWLINE 'type': type,NEWLINE 'timeInForce': timeInForce,NEWLINE 'postOnly': postOnly,NEWLINE 'side': side,NEWLINE 'amount': amount,NEWLINE 'price': price,NEWLINE 'stopPrice': stopPrice,NEWLINE 'cost': cost,NEWLINE 'filled': filled,NEWLINE 'remaining': None,NEWLINE 'timestamp': timestamp,NEWLINE 'datetime': datetime,NEWLINE 'fee': fee,NEWLINE 'status': status,NEWLINE 'info': order,NEWLINE 'lastTradeTimestamp': None,NEWLINE 'average': None,NEWLINE 'trades': None,NEWLINE }, market)NEWLINENEWLINE def fetch_funding_rate(self, symbol, params={}):NEWLINE self.load_markets()NEWLINE request = {NEWLINE 'symbol': self.market_id(symbol),NEWLINE }NEWLINE response = self.futuresPublicGetFundingRateSymbolCurrent(self.extend(request, params))NEWLINE #NEWLINE # {NEWLINE # code: "200000",NEWLINE # data: {NEWLINE # symbol: ".ETHUSDTMFPI8H",NEWLINE # granularity: 28800000,NEWLINE # timePoint: 1637380800000,NEWLINE # value: 0.0001,NEWLINE # predictedValue: 0.0001,NEWLINE # },NEWLINE # }NEWLINE #NEWLINE data = self.safe_value(response, 'data')NEWLINE timestamp = self.safe_number(data, 'timePoint')NEWLINE return {NEWLINE 'info': data,NEWLINE 'symbol': symbol,NEWLINE 'markPrice': None,NEWLINE 'indexPrice': None,NEWLINE 'interestRate': None,NEWLINE 'estimatedSettlePrice': None,NEWLINE 'timestamp': None,NEWLINE 'datetime': None,NEWLINE 'previousFundingRate': self.safe_number(data, 'value'),NEWLINE 'nextFundingRate': self.safe_number(data, 'predictedValue'),NEWLINE 'previousFundingTimestamp': timestamp,NEWLINE 'nextFundingTimestamp': None,NEWLINE 'previousFundingDatetime': self.iso8601(timestamp),NEWLINE 'nextFundingDatetime': None,NEWLINE }NEWLINENEWLINE def fetch_balance(self, params={}):NEWLINE self.load_markets()NEWLINE # only fetches one balance at a timeNEWLINE # by default it will only fetch the BTC balance of the futures accountNEWLINE # you can send 'currency' in params to fetch other currenciesNEWLINE # fetchBalance({'type': 'futures', 'currency': 'USDT'})NEWLINE response = self.futuresPrivateGetAccountOverview(params)NEWLINE #NEWLINE # {NEWLINE # code: '200000',NEWLINE # data: {NEWLINE # accountEquity: 0.00005,NEWLINE # unrealisedPNL: 0,NEWLINE # marginBalance: 0.00005,NEWLINE # positionMargin: 0,NEWLINE # orderMargin: 0,NEWLINE # frozenFunds: 0,NEWLINE # availableBalance: 0.00005,NEWLINE # currency: 'XBT'NEWLINE # }NEWLINE # }NEWLINE #NEWLINE result = {NEWLINE 'info': response,NEWLINE 'timestamp': None,NEWLINE 'datetime': None,NEWLINE }NEWLINE data = self.safe_value(response, 'data')NEWLINE currencyId = self.safe_string(data, 'currency')NEWLINE code = self.safe_currency_code(currencyId)NEWLINE account = self.account()NEWLINE account['free'] = self.safe_string(data, 'availableBalance')NEWLINE account['total'] = self.safe_string(data, 'accountEquity')NEWLINE result[code] = accountNEWLINE return self.parse_balance(result)NEWLINENEWLINE def transfer(self, code, amount, fromAccount, toAccount, params={}):NEWLINE if (toAccount != 'spot' and toAccount != 'trade' and toAccount != 'trading') or (fromAccount != 'futures' and fromAccount != 'contract'):NEWLINE raise BadRequest(self.id + ' only supports transfers from contract(futures) account to trade(spot) account')NEWLINE return self.transfer_out(code, amount, params)NEWLINENEWLINE def transfer_out(self, code, amount, params={}):NEWLINE self.load_markets()NEWLINE currency = self.currency(code)NEWLINE request = {NEWLINE 'currency': self.safe_string(currency, 'id'), # Currency,including XBT,USDTNEWLINE 'amount': amount,NEWLINE }NEWLINE # transfer from usdm futures wallet to spot walletNEWLINE response = self.futuresPrivatePostTransferOut(self.extend(request, params))NEWLINE #NEWLINE # {NEWLINE # "code": "200000",NEWLINE # "data": {NEWLINE # "applyId": "5bffb63303aa675e8bbe18f9" # Transfer-out request IDNEWLINE # }NEWLINE # }NEWLINE #NEWLINE data = self.safe_value(response, 'data')NEWLINE timestamp = self.safe_string(data, 'updatedAt')NEWLINE return {NEWLINE 'info': response,NEWLINE 'id': self.safe_string(data, 'applyId'),NEWLINE 'timestamp': timestamp,NEWLINE 'datetime': self.iso8601(timestamp),NEWLINE 'currency': code,NEWLINE 'amount': amount,NEWLINE 'fromAccount': 'futures',NEWLINE 'toAccount': 'spot',NEWLINE 'status': self.safe_string(data, 'status'),NEWLINE }NEWLINENEWLINE def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):NEWLINE self.load_markets()NEWLINE request = {NEWLINE # orderId(String) [optional] Fills for a specific order(other parameters can be ignored if specified)NEWLINE # symbol(String) [optional] Symbol of the contractNEWLINE # side(String) [optional] buy or sellNEWLINE # type(String) [optional] limit, market, limit_stop or market_stopNEWLINE # startAt(long) [optional] Start time(milisecond)NEWLINE # endAt(long) [optional] End time(milisecond)NEWLINE }NEWLINE market = NoneNEWLINE if symbol is not None:NEWLINE market = self.market(symbol)NEWLINE request['symbol'] = market['id']NEWLINE if since is not None:NEWLINE request['startAt'] = sinceNEWLINE response = self.futuresPrivateGetFills(self.extend(request, params))NEWLINE #NEWLINE # {NEWLINE # "code": "200000",NEWLINE # "data": {NEWLINE # "currentPage": 1,NEWLINE # "pageSize": 1,NEWLINE # "totalNum": 251915,NEWLINE # "totalPage": 251915,NEWLINE # "items": [NEWLINE # {NEWLINE # "symbol": "XBTUSDM", # Ticker symbol of the contractNEWLINE # "tradeId": "5ce24c1f0c19fc3c58edc47c", # Trade IDNEWLINE # "orderId": "5ce24c16b210233c36ee321d", # Order IDNEWLINE # "side": "sell", # Transaction sideNEWLINE # "liquidity": "taker", # Liquidity- taker or makerNEWLINE # "price": "8302", # Filled priceNEWLINE # "size": 10, # Filled amountNEWLINE # "value": "0.001204529", # Order valueNEWLINE # "feeRate": "0.0005", # Floating feesNEWLINE # "fixFee": "0.00000006", # Fixed feesNEWLINE # "feeCurrency": "XBT", # Charging currencyNEWLINE # "stop": "", # A mark to the stop order typeNEWLINE # "fee": "0.0000012022", # Transaction feeNEWLINE # "orderType": "limit", # Order typeNEWLINE # "tradeType": "trade", # Trade type(trade, liquidation, ADL or settlement)NEWLINE # "createdAt": 1558334496000, # Time the order createdNEWLINE # "settleCurrency": "XBT", # settlement currencyNEWLINE # "tradeTime": 1558334496000000000 # trade time in nanosecondNEWLINE # }NEWLINE # ]NEWLINE # }NEWLINE # }NEWLINE #NEWLINE data = self.safe_value(response, 'data', {})NEWLINE trades = self.safe_value(data, 'items', {})NEWLINE return self.parse_trades(trades, market, since, limit)NEWLINENEWLINE def fetch_trades(self, symbol, since=None, limit=None, params={}):NEWLINE self.load_markets()NEWLINE market = self.market(symbol)NEWLINE request = {NEWLINE 'symbol': market['id'],NEWLINE }NEWLINE response = self.futuresPublicGetTradeHistory(self.extend(request, params))NEWLINE #NEWLINE # {NEWLINE # "code": "200000",NEWLINE # "data": [NEWLINE # {NEWLINE # "sequence": 32114961,NEWLINE # "side": "buy",NEWLINE # "size": 39,NEWLINE # "price": "4001.6500000000",NEWLINE # "takerOrderId": "61c20742f172110001e0ebe4",NEWLINE # "makerOrderId": "61c2073fcfc88100010fcb5d",NEWLINE # "tradeId": "61c2074277a0c473e69029b8",NEWLINE # "ts": 1640105794099993896 # filled timeNEWLINE # }NEWLINE # ]NEWLINE # }NEWLINE #NEWLINE trades = self.safe_value(response, 'data', [])NEWLINE return self.parse_trades(trades, market, since, limit)NEWLINENEWLINE def parse_trade(self, trade, market=None):NEWLINE #NEWLINE # fetchTrades(public)NEWLINE #NEWLINE # {NEWLINE # "sequence": 32114961,NEWLINE # "side": "buy",NEWLINE # "size": 39,NEWLINE # "price": "4001.6500000000",NEWLINE # "takerOrderId": "61c20742f172110001e0ebe4",NEWLINE # "makerOrderId": "61c2073fcfc88100010fcb5d",NEWLINE # "tradeId": "61c2074277a0c473e69029b8",NEWLINE # "ts": 1640105794099993896 # filled timeNEWLINE # }NEWLINE #NEWLINE # fetchMyTrades(private) v2NEWLINE #NEWLINE # {NEWLINE # "symbol":"BTC-USDT",NEWLINE # "tradeId":"5c35c02709e4f67d5266954e",NEWLINE # "orderId":"5c35c02703aa673ceec2a168",NEWLINE # "counterOrderId":"5c1ab46003aa676e487fa8e3",NEWLINE # "side":"buy",NEWLINE # "liquidity":"taker",NEWLINE # "forceTaker":true,NEWLINE # "price":"0.083",NEWLINE # "size":"0.8424304",NEWLINE # "funds":"0.0699217232",NEWLINE # "fee":"0",NEWLINE # "feeRate":"0",NEWLINE # "feeCurrency":"USDT",NEWLINE # "stop":"",NEWLINE # "type":"limit",NEWLINE # "createdAt":1547026472000NEWLINE # }NEWLINE #NEWLINE marketId = self.safe_string(trade, 'symbol')NEWLINE symbol = self.safe_symbol(marketId, market, '-')NEWLINE id = self.safe_string_2(trade, 'tradeId', 'id')NEWLINE orderId = self.safe_string(trade, 'orderId')NEWLINE takerOrMaker = self.safe_string(trade, 'liquidity')NEWLINE timestamp = self.safe_integer(trade, 'time')NEWLINE if timestamp is not None:NEWLINE timestamp = int(timestamp / 1000000)NEWLINE else:NEWLINE timestamp = self.safe_integer(trade, 'createdAt')NEWLINE # if it's a historical v1 trade, the exchange returns timestamp in secondsNEWLINE if ('dealValue' in trade) and (timestamp is not None):NEWLINE timestamp = timestamp * 1000NEWLINE priceString = self.safe_string_2(trade, 'price', 'dealPrice')NEWLINE amountString = self.safe_string_2(trade, 'size', 'amount')NEWLINE price = self.parse_number(priceString)NEWLINE amount = self.parse_number(amountString)NEWLINE side = self.safe_string(trade, 'side')NEWLINE fee = NoneNEWLINE feeCost = self.safe_number(trade, 'fee')NEWLINE if feeCost is not None:NEWLINE feeCurrencyId = self.safe_string(trade, 'feeCurrency')NEWLINE feeCurrency = self.safe_currency_code(feeCurrencyId)NEWLINE if feeCurrency is None:NEWLINE if market is not None:NEWLINE feeCurrency = market['quote'] if (side == 'sell') else market['base']NEWLINE fee = {NEWLINE 'cost': feeCost,NEWLINE 'currency': feeCurrency,NEWLINE 'rate': self.safe_number(trade, 'feeRate'),NEWLINE }NEWLINE type = self.safe_string_2(trade, 'type', 'orderType')NEWLINE if type == 'match':NEWLINE type = NoneNEWLINE cost = self.safe_number_2(trade, 'funds', 'dealValue')NEWLINE if cost is None:NEWLINE market = self.market(symbol)NEWLINE contractSize = self.safe_string(market, 'contractSize')NEWLINE contractCost = Precise.string_mul(priceString, amountString)NEWLINE if contractSize and contractCost:NEWLINE cost = self.parse_number(Precise.string_mul(contractCost, contractSize))NEWLINE return {NEWLINE 'info': trade,NEWLINE 'id': id,NEWLINE 'order': orderId,NEWLINE 'timestamp': timestamp,NEWLINE 'datetime': self.iso8601(timestamp),NEWLINE 'symbol': symbol,NEWLINE 'type': type,NEWLINE 'takerOrMaker': takerOrMaker,NEWLINE 'side': side,NEWLINE 'price': price,NEWLINE 'amount': amount,NEWLINE 'cost': cost,NEWLINE 'fee': fee,NEWLINE }NEWLINENEWLINE def fetch_deposits(self, code=None, since=None, limit=None, params={}):NEWLINE self.load_markets()NEWLINE request = {}NEWLINE currency = NoneNEWLINE if code is not None:NEWLINE currency = self.currency(code)NEWLINE request['currency'] = currency['id']NEWLINE if limit is not None:NEWLINE request['pageSize'] = limitNEWLINE if since is not None:NEWLINE request['startAt'] = sinceNEWLINE response = self.futuresPrivateGetDepositList(self.extend(request, params))NEWLINE #NEWLINE # {NEWLINE # code: '200000',NEWLINE # data: {NEWLINE # "currentPage": 1,NEWLINE # "pageSize": 5,NEWLINE # "totalNum": 2,NEWLINE # "totalPage": 1,NEWLINE # "items": [NEWLINE # {NEWLINE # "address": "0x5f047b29041bcfdbf0e4478cdfa753a336ba6989",NEWLINE # "memo": "5c247c8a03aa677cea2a251d",NEWLINE # "amount": 1,NEWLINE # "fee": 0.0001,NEWLINE # "currency": "KCS",NEWLINE # "isInner": False,NEWLINE # "walletTxId": "5bbb57386d99522d9f954c5a@test004",NEWLINE # "status": "SUCCESS",NEWLINE # "createdAt": 1544178843000,NEWLINE # "updatedAt": 1544178891000NEWLINE # "remark":"foobar"NEWLINE # },NEWLINE # ...NEWLINE # ]NEWLINE # }NEWLINE # }NEWLINE #NEWLINE responseData = response['data']['items']NEWLINE return self.parse_transactions(responseData, currency, since, limit, {'type': 'deposit'})NEWLINENEWLINE def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):NEWLINE self.load_markets()NEWLINE request = {}NEWLINE currency = NoneNEWLINE if code is not None:NEWLINE currency = self.currency(code)NEWLINE request['currency'] = currency['id']NEWLINE if limit is not None:NEWLINE request['pageSize'] = limitNEWLINE if since is not None:NEWLINE request['startAt'] = sinceNEWLINE response = self.futuresPrivateGetWithdrawalList(self.extend(request, params))NEWLINE #NEWLINE # {NEWLINE # code: '200000',NEWLINE # data: {NEWLINE # "currentPage": 1,NEWLINE # "pageSize": 5,NEWLINE # "totalNum": 2,NEWLINE # "totalPage": 1,NEWLINE # "items": [NEWLINE # {NEWLINE # "id": "5c2dc64e03aa675aa263f1ac",NEWLINE # "address": "0x5bedb060b8eb8d823e2414d82acce78d38be7fe9",NEWLINE # "memo": "",NEWLINE # "currency": "ETH",NEWLINE # "amount": 1.0000000,NEWLINE # "fee": 0.0100000,NEWLINE # "walletTxId": "3e2414d82acce78d38be7fe9",NEWLINE # "isInner": False,NEWLINE # "status": "FAILURE",NEWLINE # "createdAt": 1546503758000,NEWLINE # "updatedAt": 1546504603000NEWLINE # },NEWLINE # ...NEWLINE # ]NEWLINE # }NEWLINE # }NEWLINE #NEWLINE responseData = response['data']['items']NEWLINE return self.parse_transactions(responseData, currency, since, limit, {'type': 'withdrawal'})NEWLINENEWLINE def fetch_funding_fee(self, code, params={}):NEWLINE raise BadRequest(self.id + ' has no method fetchFundingFee')NEWLINENEWLINE def fetch_ledger(self, code=None, since=None, limit=None, params={}):NEWLINE raise BadRequest(self.id + ' has no method fetchLedger')NEWLINE
# coding=utf-8NEWLINE# --------------------------------------------------------------------------NEWLINE# Copyright (c) Microsoft Corporation. All rights reserved.NEWLINE# Licensed under the MIT License. See License.txt in the project root for license information.NEWLINE# Code generated by Microsoft (R) AutoRest Code Generator.NEWLINE# Changes may cause incorrect behavior and will be lost if the code is regenerated.NEWLINE# --------------------------------------------------------------------------NEWLINEfrom typing import TYPE_CHECKINGNEWLINEimport warningsNEWLINENEWLINEfrom azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_errorNEWLINEfrom azure.core.paging import ItemPagedNEWLINEfrom azure.core.pipeline import PipelineResponseNEWLINEfrom azure.core.pipeline.transport import HttpRequest, HttpResponseNEWLINEfrom azure.core.polling import LROPoller, NoPolling, PollingMethodNEWLINEfrom azure.mgmt.core.exceptions import ARMErrorFormatNEWLINEfrom azure.mgmt.core.polling.arm_polling import ARMPollingNEWLINENEWLINEfrom .. import models as _modelsNEWLINENEWLINEif TYPE_CHECKING:NEWLINE # pylint: disable=unused-import,ungrouped-importsNEWLINE from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, UnionNEWLINENEWLINE T = TypeVar('T')NEWLINE ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]NEWLINENEWLINEclass StorageAccountsOperations(object):NEWLINE """StorageAccountsOperations operations.NEWLINENEWLINE You should not instantiate this class directly. Instead, you should create a Client instance thatNEWLINE instantiates it for you and attaches it as an attribute.NEWLINENEWLINE :ivar models: Alias to model classes used in this operation group.NEWLINE :type models: ~azure.mgmt.storage.v2018_07_01.modelsNEWLINE :param client: Client for service requests.NEWLINE :param config: Configuration of service client.NEWLINE :param serializer: An object model serializer.NEWLINE :param deserializer: An object model deserializer.NEWLINE """NEWLINENEWLINE models = _modelsNEWLINENEWLINE def __init__(self, client, config, serializer, deserializer):NEWLINE self._client = clientNEWLINE self._serialize = serializerNEWLINE self._deserialize = deserializerNEWLINE self._config = configNEWLINENEWLINE def check_name_availability(NEWLINE self,NEWLINE account_name, # type: "_models.StorageAccountCheckNameAvailabilityParameters"NEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> "_models.CheckNameAvailabilityResult"NEWLINE """Checks that the storage account name is valid and is not already in use.NEWLINENEWLINE :param account_name: The name of the storage account within the specified resource group.NEWLINE Storage account names must be between 3 and 24 characters in length and use numbers andNEWLINE lower-case letters only.NEWLINE :type account_name: ~azure.mgmt.storage.v2018_07_01.models.StorageAccountCheckNameAvailabilityParametersNEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :return: CheckNameAvailabilityResult, or the result of cls(response)NEWLINE :rtype: ~azure.mgmt.storage.v2018_07_01.models.CheckNameAvailabilityResultNEWLINE :raises: ~azure.core.exceptions.HttpResponseErrorNEWLINE """NEWLINE cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckNameAvailabilityResult"]NEWLINE error_map = {NEWLINE 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsErrorNEWLINE }NEWLINE error_map.update(kwargs.pop('error_map', {}))NEWLINE api_version = "2018-07-01"NEWLINE content_type = kwargs.pop("content_type", "application/json")NEWLINE accept = "application/json"NEWLINENEWLINE # Construct URLNEWLINE url = self.check_name_availability.metadata['url'] # type: ignoreNEWLINE path_format_arguments = {NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINE url = self._client.format_url(url, **path_format_arguments)NEWLINENEWLINE # Construct parametersNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')NEWLINENEWLINE # Construct headersNEWLINE header_parameters = {} # type: Dict[str, Any]NEWLINE header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')NEWLINE header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')NEWLINENEWLINE body_content_kwargs = {} # type: Dict[str, Any]NEWLINE body_content = self._serialize.body(account_name, 'StorageAccountCheckNameAvailabilityParameters')NEWLINE body_content_kwargs['content'] = body_contentNEWLINE request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)NEWLINE pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)NEWLINE response = pipeline_response.http_responseNEWLINENEWLINE if response.status_code not in [200]:NEWLINE map_error(status_code=response.status_code, response=response, error_map=error_map)NEWLINE raise HttpResponseError(response=response, error_format=ARMErrorFormat)NEWLINENEWLINE deserialized = self._deserialize('CheckNameAvailabilityResult', pipeline_response)NEWLINENEWLINE if cls:NEWLINE return cls(pipeline_response, deserialized, {})NEWLINENEWLINE return deserializedNEWLINE check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability'} # type: ignoreNEWLINENEWLINE def _create_initial(NEWLINE self,NEWLINE resource_group_name, # type: strNEWLINE account_name, # type: strNEWLINE parameters, # type: "_models.StorageAccountCreateParameters"NEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> Optional["_models.StorageAccount"]NEWLINE cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.StorageAccount"]]NEWLINE error_map = {NEWLINE 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsErrorNEWLINE }NEWLINE error_map.update(kwargs.pop('error_map', {}))NEWLINE api_version = "2018-07-01"NEWLINE content_type = kwargs.pop("content_type", "application/json")NEWLINE accept = "application/json"NEWLINENEWLINE # Construct URLNEWLINE url = self._create_initial.metadata['url'] # type: ignoreNEWLINE path_format_arguments = {NEWLINE 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),NEWLINE 'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINE url = self._client.format_url(url, **path_format_arguments)NEWLINENEWLINE # Construct parametersNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')NEWLINENEWLINE # Construct headersNEWLINE header_parameters = {} # type: Dict[str, Any]NEWLINE header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')NEWLINE header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')NEWLINENEWLINE body_content_kwargs = {} # type: Dict[str, Any]NEWLINE body_content = self._serialize.body(parameters, 'StorageAccountCreateParameters')NEWLINE body_content_kwargs['content'] = body_contentNEWLINE request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)NEWLINE pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)NEWLINE response = pipeline_response.http_responseNEWLINENEWLINE if response.status_code not in [200, 202]:NEWLINE map_error(status_code=response.status_code, response=response, error_map=error_map)NEWLINE raise HttpResponseError(response=response, error_format=ARMErrorFormat)NEWLINENEWLINE deserialized = NoneNEWLINE if response.status_code == 200:NEWLINE deserialized = self._deserialize('StorageAccount', pipeline_response)NEWLINENEWLINE if cls:NEWLINE return cls(pipeline_response, deserialized, {})NEWLINENEWLINE return deserializedNEWLINE _create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignoreNEWLINENEWLINE def begin_create(NEWLINE self,NEWLINE resource_group_name, # type: strNEWLINE account_name, # type: strNEWLINE parameters, # type: "_models.StorageAccountCreateParameters"NEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> LROPoller["_models.StorageAccount"]NEWLINE """Asynchronously creates a new storage account with the specified parameters. If an account isNEWLINE already created and a subsequent create request is issued with different properties, theNEWLINE account properties will be updated. If an account is already created and a subsequent create orNEWLINE update request is issued with the exact same set of properties, the request will succeed.NEWLINENEWLINE :param resource_group_name: The name of the resource group within the user's subscription. TheNEWLINE name is case insensitive.NEWLINE :type resource_group_name: strNEWLINE :param account_name: The name of the storage account within the specified resource group.NEWLINE Storage account names must be between 3 and 24 characters in length and use numbers andNEWLINE lower-case letters only.NEWLINE :type account_name: strNEWLINE :param parameters: The parameters to provide for the created account.NEWLINE :type parameters: ~azure.mgmt.storage.v2018_07_01.models.StorageAccountCreateParametersNEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :keyword str continuation_token: A continuation token to restart a poller from a saved state.NEWLINE :keyword polling: By default, your polling method will be ARMPolling.NEWLINE Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.NEWLINE :paramtype polling: bool or ~azure.core.polling.PollingMethodNEWLINE :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.NEWLINE :return: An instance of LROPoller that returns either StorageAccount or the result of cls(response)NEWLINE :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.storage.v2018_07_01.models.StorageAccount]NEWLINE :raises ~azure.core.exceptions.HttpResponseError:NEWLINE """NEWLINE polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]NEWLINE cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]NEWLINE lro_delay = kwargs.pop(NEWLINE 'polling_interval',NEWLINE self._config.polling_intervalNEWLINE )NEWLINE cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]NEWLINE if cont_token is None:NEWLINE raw_result = self._create_initial(NEWLINE resource_group_name=resource_group_name,NEWLINE account_name=account_name,NEWLINE parameters=parameters,NEWLINE cls=lambda x,y,z: x,NEWLINE **kwargsNEWLINE )NEWLINENEWLINE kwargs.pop('error_map', None)NEWLINE kwargs.pop('content_type', None)NEWLINENEWLINE def get_long_running_output(pipeline_response):NEWLINE deserialized = self._deserialize('StorageAccount', pipeline_response)NEWLINENEWLINE if cls:NEWLINE return cls(pipeline_response, deserialized, {})NEWLINE return deserializedNEWLINENEWLINE path_format_arguments = {NEWLINE 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),NEWLINE 'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINENEWLINE if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)NEWLINE elif polling is False: polling_method = NoPolling()NEWLINE else: polling_method = pollingNEWLINE if cont_token:NEWLINE return LROPoller.from_continuation_token(NEWLINE polling_method=polling_method,NEWLINE continuation_token=cont_token,NEWLINE client=self._client,NEWLINE deserialization_callback=get_long_running_outputNEWLINE )NEWLINE else:NEWLINE return LROPoller(self._client, raw_result, get_long_running_output, polling_method)NEWLINE begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignoreNEWLINENEWLINE def delete(NEWLINE self,NEWLINE resource_group_name, # type: strNEWLINE account_name, # type: strNEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> NoneNEWLINE """Deletes a storage account in Microsoft Azure.NEWLINENEWLINE :param resource_group_name: The name of the resource group within the user's subscription. TheNEWLINE name is case insensitive.NEWLINE :type resource_group_name: strNEWLINE :param account_name: The name of the storage account within the specified resource group.NEWLINE Storage account names must be between 3 and 24 characters in length and use numbers andNEWLINE lower-case letters only.NEWLINE :type account_name: strNEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :return: None, or the result of cls(response)NEWLINE :rtype: NoneNEWLINE :raises: ~azure.core.exceptions.HttpResponseErrorNEWLINE """NEWLINE cls = kwargs.pop('cls', None) # type: ClsType[None]NEWLINE error_map = {NEWLINE 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsErrorNEWLINE }NEWLINE error_map.update(kwargs.pop('error_map', {}))NEWLINE api_version = "2018-07-01"NEWLINENEWLINE # Construct URLNEWLINE url = self.delete.metadata['url'] # type: ignoreNEWLINE path_format_arguments = {NEWLINE 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),NEWLINE 'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINE url = self._client.format_url(url, **path_format_arguments)NEWLINENEWLINE # Construct parametersNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')NEWLINENEWLINE # Construct headersNEWLINE header_parameters = {} # type: Dict[str, Any]NEWLINENEWLINE request = self._client.delete(url, query_parameters, header_parameters)NEWLINE pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)NEWLINE response = pipeline_response.http_responseNEWLINENEWLINE if response.status_code not in [200, 204]:NEWLINE map_error(status_code=response.status_code, response=response, error_map=error_map)NEWLINE raise HttpResponseError(response=response, error_format=ARMErrorFormat)NEWLINENEWLINE if cls:NEWLINE return cls(pipeline_response, None, {})NEWLINENEWLINE delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignoreNEWLINENEWLINE def get_properties(NEWLINE self,NEWLINE resource_group_name, # type: strNEWLINE account_name, # type: strNEWLINE expand="geoReplicationStats", # type: Optional[str]NEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> "_models.StorageAccount"NEWLINE """Returns the properties for the specified storage account including but not limited to name, SKUNEWLINE name, location, and account status. The ListKeys operation should be used to retrieve storageNEWLINE keys.NEWLINENEWLINE :param resource_group_name: The name of the resource group within the user's subscription. TheNEWLINE name is case insensitive.NEWLINE :type resource_group_name: strNEWLINE :param account_name: The name of the storage account within the specified resource group.NEWLINE Storage account names must be between 3 and 24 characters in length and use numbers andNEWLINE lower-case letters only.NEWLINE :type account_name: strNEWLINE :param expand: May be used to expand the properties within account's properties. By default,NEWLINE data is not included when fetching properties. Currently we only support geoReplicationStats.NEWLINE :type expand: strNEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :return: StorageAccount, or the result of cls(response)NEWLINE :rtype: ~azure.mgmt.storage.v2018_07_01.models.StorageAccountNEWLINE :raises: ~azure.core.exceptions.HttpResponseErrorNEWLINE """NEWLINE cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]NEWLINE error_map = {NEWLINE 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsErrorNEWLINE }NEWLINE error_map.update(kwargs.pop('error_map', {}))NEWLINE api_version = "2018-07-01"NEWLINE accept = "application/json"NEWLINENEWLINE # Construct URLNEWLINE url = self.get_properties.metadata['url'] # type: ignoreNEWLINE path_format_arguments = {NEWLINE 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),NEWLINE 'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINE url = self._client.format_url(url, **path_format_arguments)NEWLINENEWLINE # Construct parametersNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')NEWLINE if expand is not None:NEWLINE query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')NEWLINENEWLINE # Construct headersNEWLINE header_parameters = {} # type: Dict[str, Any]NEWLINE header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')NEWLINENEWLINE request = self._client.get(url, query_parameters, header_parameters)NEWLINE pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)NEWLINE response = pipeline_response.http_responseNEWLINENEWLINE if response.status_code not in [200]:NEWLINE map_error(status_code=response.status_code, response=response, error_map=error_map)NEWLINE raise HttpResponseError(response=response, error_format=ARMErrorFormat)NEWLINENEWLINE deserialized = self._deserialize('StorageAccount', pipeline_response)NEWLINENEWLINE if cls:NEWLINE return cls(pipeline_response, deserialized, {})NEWLINENEWLINE return deserializedNEWLINE get_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignoreNEWLINENEWLINE def update(NEWLINE self,NEWLINE resource_group_name, # type: strNEWLINE account_name, # type: strNEWLINE parameters, # type: "_models.StorageAccountUpdateParameters"NEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> "_models.StorageAccount"NEWLINE """The update operation can be used to update the SKU, encryption, access tier, or tags for aNEWLINE storage account. It can also be used to map the account to a custom domain. Only one customNEWLINE domain is supported per storage account; the replacement/change of custom domain is notNEWLINE supported. In order to replace an old custom domain, the old value must be cleared/unregisteredNEWLINE before a new value can be set. The update of multiple properties is supported. This call doesNEWLINE not change the storage keys for the account. If you want to change the storage account keys,NEWLINE use the regenerate keys operation. The location and name of the storage account cannot beNEWLINE changed after creation.NEWLINENEWLINE :param resource_group_name: The name of the resource group within the user's subscription. TheNEWLINE name is case insensitive.NEWLINE :type resource_group_name: strNEWLINE :param account_name: The name of the storage account within the specified resource group.NEWLINE Storage account names must be between 3 and 24 characters in length and use numbers andNEWLINE lower-case letters only.NEWLINE :type account_name: strNEWLINE :param parameters: The parameters to provide for the updated account.NEWLINE :type parameters: ~azure.mgmt.storage.v2018_07_01.models.StorageAccountUpdateParametersNEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :return: StorageAccount, or the result of cls(response)NEWLINE :rtype: ~azure.mgmt.storage.v2018_07_01.models.StorageAccountNEWLINE :raises: ~azure.core.exceptions.HttpResponseErrorNEWLINE """NEWLINE cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]NEWLINE error_map = {NEWLINE 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsErrorNEWLINE }NEWLINE error_map.update(kwargs.pop('error_map', {}))NEWLINE api_version = "2018-07-01"NEWLINE content_type = kwargs.pop("content_type", "application/json")NEWLINE accept = "application/json"NEWLINENEWLINE # Construct URLNEWLINE url = self.update.metadata['url'] # type: ignoreNEWLINE path_format_arguments = {NEWLINE 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),NEWLINE 'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINE url = self._client.format_url(url, **path_format_arguments)NEWLINENEWLINE # Construct parametersNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')NEWLINENEWLINE # Construct headersNEWLINE header_parameters = {} # type: Dict[str, Any]NEWLINE header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')NEWLINE header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')NEWLINENEWLINE body_content_kwargs = {} # type: Dict[str, Any]NEWLINE body_content = self._serialize.body(parameters, 'StorageAccountUpdateParameters')NEWLINE body_content_kwargs['content'] = body_contentNEWLINE request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)NEWLINE pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)NEWLINE response = pipeline_response.http_responseNEWLINENEWLINE if response.status_code not in [200]:NEWLINE map_error(status_code=response.status_code, response=response, error_map=error_map)NEWLINE raise HttpResponseError(response=response, error_format=ARMErrorFormat)NEWLINENEWLINE deserialized = self._deserialize('StorageAccount', pipeline_response)NEWLINENEWLINE if cls:NEWLINE return cls(pipeline_response, deserialized, {})NEWLINENEWLINE return deserializedNEWLINE update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignoreNEWLINENEWLINE def list(NEWLINE self,NEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> Iterable["_models.StorageAccountListResult"]NEWLINE """Lists all the storage accounts available under the subscription. Note that storage keys are notNEWLINE returned; use the ListKeys operation for this.NEWLINENEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :return: An iterator like instance of either StorageAccountListResult or the result of cls(response)NEWLINE :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2018_07_01.models.StorageAccountListResult]NEWLINE :raises: ~azure.core.exceptions.HttpResponseErrorNEWLINE """NEWLINE cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListResult"]NEWLINE error_map = {NEWLINE 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsErrorNEWLINE }NEWLINE error_map.update(kwargs.pop('error_map', {}))NEWLINE api_version = "2018-07-01"NEWLINE accept = "application/json"NEWLINENEWLINE def prepare_request(next_link=None):NEWLINE # Construct headersNEWLINE header_parameters = {} # type: Dict[str, Any]NEWLINE header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')NEWLINENEWLINE if not next_link:NEWLINE # Construct URLNEWLINE url = self.list.metadata['url'] # type: ignoreNEWLINE path_format_arguments = {NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINE url = self._client.format_url(url, **path_format_arguments)NEWLINE # Construct parametersNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')NEWLINENEWLINE request = self._client.get(url, query_parameters, header_parameters)NEWLINE else:NEWLINE url = next_linkNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE request = self._client.get(url, query_parameters, header_parameters)NEWLINE return requestNEWLINENEWLINE def extract_data(pipeline_response):NEWLINE deserialized = self._deserialize('StorageAccountListResult', pipeline_response)NEWLINE list_of_elem = deserialized.valueNEWLINE if cls:NEWLINE list_of_elem = cls(list_of_elem)NEWLINE return None, iter(list_of_elem)NEWLINENEWLINE def get_next(next_link=None):NEWLINE request = prepare_request(next_link)NEWLINENEWLINE pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)NEWLINE response = pipeline_response.http_responseNEWLINENEWLINE if response.status_code not in [200]:NEWLINE map_error(status_code=response.status_code, response=response, error_map=error_map)NEWLINE raise HttpResponseError(response=response, error_format=ARMErrorFormat)NEWLINENEWLINE return pipeline_responseNEWLINENEWLINE return ItemPaged(NEWLINE get_next, extract_dataNEWLINE )NEWLINE list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts'} # type: ignoreNEWLINENEWLINE def list_by_resource_group(NEWLINE self,NEWLINE resource_group_name, # type: strNEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> Iterable["_models.StorageAccountListResult"]NEWLINE """Lists all the storage accounts available under the given resource group. Note that storage keysNEWLINE are not returned; use the ListKeys operation for this.NEWLINENEWLINE :param resource_group_name: The name of the resource group within the user's subscription. TheNEWLINE name is case insensitive.NEWLINE :type resource_group_name: strNEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :return: An iterator like instance of either StorageAccountListResult or the result of cls(response)NEWLINE :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2018_07_01.models.StorageAccountListResult]NEWLINE :raises: ~azure.core.exceptions.HttpResponseErrorNEWLINE """NEWLINE cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListResult"]NEWLINE error_map = {NEWLINE 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsErrorNEWLINE }NEWLINE error_map.update(kwargs.pop('error_map', {}))NEWLINE api_version = "2018-07-01"NEWLINE accept = "application/json"NEWLINENEWLINE def prepare_request(next_link=None):NEWLINE # Construct headersNEWLINE header_parameters = {} # type: Dict[str, Any]NEWLINE header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')NEWLINENEWLINE if not next_link:NEWLINE # Construct URLNEWLINE url = self.list_by_resource_group.metadata['url'] # type: ignoreNEWLINE path_format_arguments = {NEWLINE 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINE url = self._client.format_url(url, **path_format_arguments)NEWLINE # Construct parametersNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')NEWLINENEWLINE request = self._client.get(url, query_parameters, header_parameters)NEWLINE else:NEWLINE url = next_linkNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE request = self._client.get(url, query_parameters, header_parameters)NEWLINE return requestNEWLINENEWLINE def extract_data(pipeline_response):NEWLINE deserialized = self._deserialize('StorageAccountListResult', pipeline_response)NEWLINE list_of_elem = deserialized.valueNEWLINE if cls:NEWLINE list_of_elem = cls(list_of_elem)NEWLINE return None, iter(list_of_elem)NEWLINENEWLINE def get_next(next_link=None):NEWLINE request = prepare_request(next_link)NEWLINENEWLINE pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)NEWLINE response = pipeline_response.http_responseNEWLINENEWLINE if response.status_code not in [200]:NEWLINE map_error(status_code=response.status_code, response=response, error_map=error_map)NEWLINE raise HttpResponseError(response=response, error_format=ARMErrorFormat)NEWLINENEWLINE return pipeline_responseNEWLINENEWLINE return ItemPaged(NEWLINE get_next, extract_dataNEWLINE )NEWLINE list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts'} # type: ignoreNEWLINENEWLINE def list_keys(NEWLINE self,NEWLINE resource_group_name, # type: strNEWLINE account_name, # type: strNEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> "_models.StorageAccountListKeysResult"NEWLINE """Lists the access keys for the specified storage account.NEWLINENEWLINE :param resource_group_name: The name of the resource group within the user's subscription. TheNEWLINE name is case insensitive.NEWLINE :type resource_group_name: strNEWLINE :param account_name: The name of the storage account within the specified resource group.NEWLINE Storage account names must be between 3 and 24 characters in length and use numbers andNEWLINE lower-case letters only.NEWLINE :type account_name: strNEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :return: StorageAccountListKeysResult, or the result of cls(response)NEWLINE :rtype: ~azure.mgmt.storage.v2018_07_01.models.StorageAccountListKeysResultNEWLINE :raises: ~azure.core.exceptions.HttpResponseErrorNEWLINE """NEWLINE cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListKeysResult"]NEWLINE error_map = {NEWLINE 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsErrorNEWLINE }NEWLINE error_map.update(kwargs.pop('error_map', {}))NEWLINE api_version = "2018-07-01"NEWLINE accept = "application/json"NEWLINENEWLINE # Construct URLNEWLINE url = self.list_keys.metadata['url'] # type: ignoreNEWLINE path_format_arguments = {NEWLINE 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),NEWLINE 'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINE url = self._client.format_url(url, **path_format_arguments)NEWLINENEWLINE # Construct parametersNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')NEWLINENEWLINE # Construct headersNEWLINE header_parameters = {} # type: Dict[str, Any]NEWLINE header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')NEWLINENEWLINE request = self._client.post(url, query_parameters, header_parameters)NEWLINE pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)NEWLINE response = pipeline_response.http_responseNEWLINENEWLINE if response.status_code not in [200]:NEWLINE map_error(status_code=response.status_code, response=response, error_map=error_map)NEWLINE raise HttpResponseError(response=response, error_format=ARMErrorFormat)NEWLINENEWLINE deserialized = self._deserialize('StorageAccountListKeysResult', pipeline_response)NEWLINENEWLINE if cls:NEWLINE return cls(pipeline_response, deserialized, {})NEWLINENEWLINE return deserializedNEWLINE list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys'} # type: ignoreNEWLINENEWLINE def regenerate_key(NEWLINE self,NEWLINE resource_group_name, # type: strNEWLINE account_name, # type: strNEWLINE regenerate_key, # type: "_models.StorageAccountRegenerateKeyParameters"NEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> "_models.StorageAccountListKeysResult"NEWLINE """Regenerates one of the access keys for the specified storage account.NEWLINENEWLINE :param resource_group_name: The name of the resource group within the user's subscription. TheNEWLINE name is case insensitive.NEWLINE :type resource_group_name: strNEWLINE :param account_name: The name of the storage account within the specified resource group.NEWLINE Storage account names must be between 3 and 24 characters in length and use numbers andNEWLINE lower-case letters only.NEWLINE :type account_name: strNEWLINE :param regenerate_key: Specifies name of the key which should be regenerated -- key1 or key2.NEWLINE :type regenerate_key: ~azure.mgmt.storage.v2018_07_01.models.StorageAccountRegenerateKeyParametersNEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :return: StorageAccountListKeysResult, or the result of cls(response)NEWLINE :rtype: ~azure.mgmt.storage.v2018_07_01.models.StorageAccountListKeysResultNEWLINE :raises: ~azure.core.exceptions.HttpResponseErrorNEWLINE """NEWLINE cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListKeysResult"]NEWLINE error_map = {NEWLINE 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsErrorNEWLINE }NEWLINE error_map.update(kwargs.pop('error_map', {}))NEWLINE api_version = "2018-07-01"NEWLINE content_type = kwargs.pop("content_type", "application/json")NEWLINE accept = "application/json"NEWLINENEWLINE # Construct URLNEWLINE url = self.regenerate_key.metadata['url'] # type: ignoreNEWLINE path_format_arguments = {NEWLINE 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),NEWLINE 'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINE url = self._client.format_url(url, **path_format_arguments)NEWLINENEWLINE # Construct parametersNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')NEWLINENEWLINE # Construct headersNEWLINE header_parameters = {} # type: Dict[str, Any]NEWLINE header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')NEWLINE header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')NEWLINENEWLINE body_content_kwargs = {} # type: Dict[str, Any]NEWLINE body_content = self._serialize.body(regenerate_key, 'StorageAccountRegenerateKeyParameters')NEWLINE body_content_kwargs['content'] = body_contentNEWLINE request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)NEWLINE pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)NEWLINE response = pipeline_response.http_responseNEWLINENEWLINE if response.status_code not in [200]:NEWLINE map_error(status_code=response.status_code, response=response, error_map=error_map)NEWLINE raise HttpResponseError(response=response, error_format=ARMErrorFormat)NEWLINENEWLINE deserialized = self._deserialize('StorageAccountListKeysResult', pipeline_response)NEWLINENEWLINE if cls:NEWLINE return cls(pipeline_response, deserialized, {})NEWLINENEWLINE return deserializedNEWLINE regenerate_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey'} # type: ignoreNEWLINENEWLINE def list_account_sas(NEWLINE self,NEWLINE resource_group_name, # type: strNEWLINE account_name, # type: strNEWLINE parameters, # type: "_models.AccountSasParameters"NEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> "_models.ListAccountSasResponse"NEWLINE """List SAS credentials of a storage account.NEWLINENEWLINE :param resource_group_name: The name of the resource group within the user's subscription. TheNEWLINE name is case insensitive.NEWLINE :type resource_group_name: strNEWLINE :param account_name: The name of the storage account within the specified resource group.NEWLINE Storage account names must be between 3 and 24 characters in length and use numbers andNEWLINE lower-case letters only.NEWLINE :type account_name: strNEWLINE :param parameters: The parameters to provide to list SAS credentials for the storage account.NEWLINE :type parameters: ~azure.mgmt.storage.v2018_07_01.models.AccountSasParametersNEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :return: ListAccountSasResponse, or the result of cls(response)NEWLINE :rtype: ~azure.mgmt.storage.v2018_07_01.models.ListAccountSasResponseNEWLINE :raises: ~azure.core.exceptions.HttpResponseErrorNEWLINE """NEWLINE cls = kwargs.pop('cls', None) # type: ClsType["_models.ListAccountSasResponse"]NEWLINE error_map = {NEWLINE 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsErrorNEWLINE }NEWLINE error_map.update(kwargs.pop('error_map', {}))NEWLINE api_version = "2018-07-01"NEWLINE content_type = kwargs.pop("content_type", "application/json")NEWLINE accept = "application/json"NEWLINENEWLINE # Construct URLNEWLINE url = self.list_account_sas.metadata['url'] # type: ignoreNEWLINE path_format_arguments = {NEWLINE 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),NEWLINE 'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINE url = self._client.format_url(url, **path_format_arguments)NEWLINENEWLINE # Construct parametersNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')NEWLINENEWLINE # Construct headersNEWLINE header_parameters = {} # type: Dict[str, Any]NEWLINE header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')NEWLINE header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')NEWLINENEWLINE body_content_kwargs = {} # type: Dict[str, Any]NEWLINE body_content = self._serialize.body(parameters, 'AccountSasParameters')NEWLINE body_content_kwargs['content'] = body_contentNEWLINE request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)NEWLINE pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)NEWLINE response = pipeline_response.http_responseNEWLINENEWLINE if response.status_code not in [200]:NEWLINE map_error(status_code=response.status_code, response=response, error_map=error_map)NEWLINE raise HttpResponseError(response=response, error_format=ARMErrorFormat)NEWLINENEWLINE deserialized = self._deserialize('ListAccountSasResponse', pipeline_response)NEWLINENEWLINE if cls:NEWLINE return cls(pipeline_response, deserialized, {})NEWLINENEWLINE return deserializedNEWLINE list_account_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas'} # type: ignoreNEWLINENEWLINE def list_service_sas(NEWLINE self,NEWLINE resource_group_name, # type: strNEWLINE account_name, # type: strNEWLINE parameters, # type: "_models.ServiceSasParameters"NEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> "_models.ListServiceSasResponse"NEWLINE """List service SAS credentials of a specific resource.NEWLINENEWLINE :param resource_group_name: The name of the resource group within the user's subscription. TheNEWLINE name is case insensitive.NEWLINE :type resource_group_name: strNEWLINE :param account_name: The name of the storage account within the specified resource group.NEWLINE Storage account names must be between 3 and 24 characters in length and use numbers andNEWLINE lower-case letters only.NEWLINE :type account_name: strNEWLINE :param parameters: The parameters to provide to list service SAS credentials.NEWLINE :type parameters: ~azure.mgmt.storage.v2018_07_01.models.ServiceSasParametersNEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :return: ListServiceSasResponse, or the result of cls(response)NEWLINE :rtype: ~azure.mgmt.storage.v2018_07_01.models.ListServiceSasResponseNEWLINE :raises: ~azure.core.exceptions.HttpResponseErrorNEWLINE """NEWLINE cls = kwargs.pop('cls', None) # type: ClsType["_models.ListServiceSasResponse"]NEWLINE error_map = {NEWLINE 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsErrorNEWLINE }NEWLINE error_map.update(kwargs.pop('error_map', {}))NEWLINE api_version = "2018-07-01"NEWLINE content_type = kwargs.pop("content_type", "application/json")NEWLINE accept = "application/json"NEWLINENEWLINE # Construct URLNEWLINE url = self.list_service_sas.metadata['url'] # type: ignoreNEWLINE path_format_arguments = {NEWLINE 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),NEWLINE 'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINE url = self._client.format_url(url, **path_format_arguments)NEWLINENEWLINE # Construct parametersNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')NEWLINENEWLINE # Construct headersNEWLINE header_parameters = {} # type: Dict[str, Any]NEWLINE header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')NEWLINE header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')NEWLINENEWLINE body_content_kwargs = {} # type: Dict[str, Any]NEWLINE body_content = self._serialize.body(parameters, 'ServiceSasParameters')NEWLINE body_content_kwargs['content'] = body_contentNEWLINE request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)NEWLINE pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)NEWLINE response = pipeline_response.http_responseNEWLINENEWLINE if response.status_code not in [200]:NEWLINE map_error(status_code=response.status_code, response=response, error_map=error_map)NEWLINE raise HttpResponseError(response=response, error_format=ARMErrorFormat)NEWLINENEWLINE deserialized = self._deserialize('ListServiceSasResponse', pipeline_response)NEWLINENEWLINE if cls:NEWLINE return cls(pipeline_response, deserialized, {})NEWLINENEWLINE return deserializedNEWLINE list_service_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas'} # type: ignoreNEWLINENEWLINE def _failover_initial(NEWLINE self,NEWLINE resource_group_name, # type: strNEWLINE account_name, # type: strNEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> NoneNEWLINE cls = kwargs.pop('cls', None) # type: ClsType[None]NEWLINE error_map = {NEWLINE 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsErrorNEWLINE }NEWLINE error_map.update(kwargs.pop('error_map', {}))NEWLINE api_version = "2018-07-01"NEWLINENEWLINE # Construct URLNEWLINE url = self._failover_initial.metadata['url'] # type: ignoreNEWLINE path_format_arguments = {NEWLINE 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),NEWLINE 'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINE url = self._client.format_url(url, **path_format_arguments)NEWLINENEWLINE # Construct parametersNEWLINE query_parameters = {} # type: Dict[str, Any]NEWLINE query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')NEWLINENEWLINE # Construct headersNEWLINE header_parameters = {} # type: Dict[str, Any]NEWLINENEWLINE request = self._client.post(url, query_parameters, header_parameters)NEWLINE pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)NEWLINE response = pipeline_response.http_responseNEWLINENEWLINE if response.status_code not in [200, 202]:NEWLINE map_error(status_code=response.status_code, response=response, error_map=error_map)NEWLINE raise HttpResponseError(response=response, error_format=ARMErrorFormat)NEWLINENEWLINE if cls:NEWLINE return cls(pipeline_response, None, {})NEWLINENEWLINE _failover_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/failover'} # type: ignoreNEWLINENEWLINE def begin_failover(NEWLINE self,NEWLINE resource_group_name, # type: strNEWLINE account_name, # type: strNEWLINE **kwargs # type: AnyNEWLINE ):NEWLINE # type: (...) -> LROPoller[None]NEWLINE """Failover request can be triggered for a storage account in case of availability issues. TheNEWLINE failover occurs from the storage account's primary cluster to secondary cluster for RA-GRSNEWLINE accounts. The secondary cluster will become primary after failover.NEWLINENEWLINE :param resource_group_name: The name of the resource group within the user's subscription. TheNEWLINE name is case insensitive.NEWLINE :type resource_group_name: strNEWLINE :param account_name: The name of the storage account within the specified resource group.NEWLINE Storage account names must be between 3 and 24 characters in length and use numbers andNEWLINE lower-case letters only.NEWLINE :type account_name: strNEWLINE :keyword callable cls: A custom type or function that will be passed the direct responseNEWLINE :keyword str continuation_token: A continuation token to restart a poller from a saved state.NEWLINE :keyword polling: By default, your polling method will be ARMPolling.NEWLINE Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.NEWLINE :paramtype polling: bool or ~azure.core.polling.PollingMethodNEWLINE :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.NEWLINE :return: An instance of LROPoller that returns either None or the result of cls(response)NEWLINE :rtype: ~azure.core.polling.LROPoller[None]NEWLINE :raises ~azure.core.exceptions.HttpResponseError:NEWLINE """NEWLINE polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]NEWLINE cls = kwargs.pop('cls', None) # type: ClsType[None]NEWLINE lro_delay = kwargs.pop(NEWLINE 'polling_interval',NEWLINE self._config.polling_intervalNEWLINE )NEWLINE cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]NEWLINE if cont_token is None:NEWLINE raw_result = self._failover_initial(NEWLINE resource_group_name=resource_group_name,NEWLINE account_name=account_name,NEWLINE cls=lambda x,y,z: x,NEWLINE **kwargsNEWLINE )NEWLINENEWLINE kwargs.pop('error_map', None)NEWLINE kwargs.pop('content_type', None)NEWLINENEWLINE def get_long_running_output(pipeline_response):NEWLINE if cls:NEWLINE return cls(pipeline_response, None, {})NEWLINENEWLINE path_format_arguments = {NEWLINE 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),NEWLINE 'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),NEWLINE 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),NEWLINE }NEWLINENEWLINE if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)NEWLINE elif polling is False: polling_method = NoPolling()NEWLINE else: polling_method = pollingNEWLINE if cont_token:NEWLINE return LROPoller.from_continuation_token(NEWLINE polling_method=polling_method,NEWLINE continuation_token=cont_token,NEWLINE client=self._client,NEWLINE deserialization_callback=get_long_running_outputNEWLINE )NEWLINE else:NEWLINE return LROPoller(self._client, raw_result, get_long_running_output, polling_method)NEWLINE begin_failover.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/failover'} # type: ignoreNEWLINE
"""NEWLINEUnionFind.pyNEWLINENEWLINESource: http://www.ics.uci.edu/~eppstein/PADS/UnionFind.pyNEWLINENEWLINEUnion-find data structure. Based on Josiah Carlson's code,NEWLINEhttp://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/215912NEWLINEwith significant additional changes by D. Eppstein.NEWLINE"""NEWLINENEWLINEfrom collections import defaultdictNEWLINENEWLINENEWLINEclass UnionFind(object):NEWLINE """Union-find data structure.NEWLINENEWLINE Each unionFind instance X maintains a family of disjoint sets ofNEWLINE hashable objects, supporting the following two methods:NEWLINENEWLINE - X[item] returns a name for the set containing the given item.NEWLINE Each set is named by an arbitrarily-chosen one of its members; asNEWLINE long as the set remains unchanged it will keep the same name. IfNEWLINE the item is not yet part of a set in X, a new singleton set isNEWLINE created for it.NEWLINENEWLINE - X.union(item1, item2, ...) merges the sets containing each itemNEWLINE into a single larger set. If any item is not yet part of a setNEWLINE in X, it is added to X as one of the members of the merged set.NEWLINE """NEWLINENEWLINE def __init__(self):NEWLINE """Create a new empty union-find structure."""NEWLINE self.weights = {}NEWLINE self.parents = {}NEWLINENEWLINE def __getitem__(self, object):NEWLINE """Find and return the name of the set containing the object."""NEWLINE # check for previously unknown objectNEWLINE if object not in self.parents:NEWLINE self.parents[object] = objectNEWLINE self.weights[object] = 1NEWLINE return objectNEWLINENEWLINE # find path of objects leading to the rootNEWLINE path = [object]NEWLINE root = self.parents[object]NEWLINE while root != path[-1]:NEWLINE path.append(root)NEWLINE root = self.parents[root]NEWLINENEWLINE # compress the path and returnNEWLINE for ancestor in path:NEWLINE self.parents[ancestor] = rootNEWLINE return rootNEWLINENEWLINE def __iter__(self):NEWLINE """Iterate through all items ever found or unioned by this structure."""NEWLINE return iter(self.parents)NEWLINENEWLINE def union(self, *objects):NEWLINE """Find the sets containing the objects and merge them all."""NEWLINE roots = [self[x] for x in objects]NEWLINE heaviest = max([(self.weights[r],r) for r in roots])[1]NEWLINE for r in roots:NEWLINE if r != heaviest:NEWLINE self.weights[heaviest] += self.weights[r]NEWLINE self.parents[r] = heaviestNEWLINENEWLINE def sets(self):NEWLINE """Return a list of each disjoint set"""NEWLINE ret = defaultdict(list)NEWLINE for k, _ in self.parents.iteritems():NEWLINE ret[self[k]].append(k)NEWLINE return ret.values()NEWLINENEWLINE NEWLINEif __name__ == '__main__':NEWLINENEWLINE # testNEWLINE uf = UnionFind()NEWLINE uf.union(0, 1)NEWLINE uf.union(2, 3)NEWLINE uf.union(3, 0)NEWLINE assert uf.sets() == [[0, 1, 2, 3]]NEWLINE
import sysNEWLINEimport osNEWLINEsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))NEWLINENEWLINEimport coreNEWLINE
import numpy as npNEWLINENEWLINEfrom ._points_utils import points_in_boxNEWLINENEWLINENEWLINEdef select(layer, event):NEWLINE """Select points.NEWLINENEWLINE Clicking on a point will select that point. If holding shift while clickingNEWLINE that point will be added to or removed from the existing selectionNEWLINE depending on whether it is selected or not.NEWLINENEWLINE Clicking and dragging a point that is already selected will drag all theNEWLINE currently selected points.NEWLINENEWLINE Clicking and dragging on an empty part of the canvas (i.e. not on a point)NEWLINE will create a drag box that will select all points inside it when finished.NEWLINE Holding shift throughout the entirety of this process will add those pointsNEWLINE to any existing selection, otherwise these will become the only selectedNEWLINE points.NEWLINE """NEWLINE # on pressNEWLINE modify_selection = (NEWLINE 'Shift' in event.modifiers or 'Control' in event.modifiersNEWLINE )NEWLINENEWLINE # Get value under the cursor, for points, this is the index of the highlightedNEWLINE # if any, or None.NEWLINE value = layer.get_value(event.position, world=True)NEWLINE # if modifying selection add / remove any from existing selectionNEWLINE if modify_selection:NEWLINE if value is not None:NEWLINE layer.selected_data = _toggle_selected(layer.selected_data, value)NEWLINE else:NEWLINE if value is not None:NEWLINE # If the current index is not in the current list make it the onlyNEWLINE # index selected, otherwise don't change the selection so thatNEWLINE # the current selection can be dragged together.NEWLINE if value not in layer.selected_data:NEWLINE layer.selected_data = {value}NEWLINE else:NEWLINE layer.selected_data = set()NEWLINE layer._set_highlight()NEWLINENEWLINE yieldNEWLINENEWLINE is_moving = FalseNEWLINE # on moveNEWLINE while event.type == 'mouse_move':NEWLINE coordinates = layer.world_to_data(event.position)NEWLINE # If not holding modifying selection and points selected then drag themNEWLINE if not modify_selection and len(layer.selected_data) > 0:NEWLINE is_moving = TrueNEWLINE with layer.events.data.blocker():NEWLINE layer._move(layer.selected_data, coordinates)NEWLINE else:NEWLINE coord = [coordinates[i] for i in layer._dims_displayed]NEWLINE layer._is_selecting = TrueNEWLINE if layer._drag_start is None:NEWLINE layer._drag_start = coordNEWLINE layer._drag_box = np.array([layer._drag_start, coord])NEWLINE layer._set_highlight()NEWLINE yieldNEWLINENEWLINE # only emit data once dragging has finishedNEWLINE if is_moving:NEWLINE layer._move([], coordinates)NEWLINE is_moving = FalseNEWLINENEWLINE # on releaseNEWLINE layer._drag_start = NoneNEWLINE if layer._is_selecting:NEWLINE layer._is_selecting = FalseNEWLINE if len(layer._view_data) > 0:NEWLINE selection = points_in_box(NEWLINE layer._drag_box, layer._view_data, layer._view_sizeNEWLINE )NEWLINE # If shift combine drag selection with existing selected onesNEWLINE if modify_selection:NEWLINE new_selected = layer._indices_view[selection]NEWLINE target = set(layer.selected_data).symmetric_difference(NEWLINE set(new_selected)NEWLINE )NEWLINE layer.selected_data = list(target)NEWLINE else:NEWLINE layer.selected_data = layer._indices_view[selection]NEWLINE else:NEWLINE layer.selected_data = set()NEWLINE layer._set_highlight(force=True)NEWLINENEWLINENEWLINEDRAG_DIST_THRESHOLD = 5NEWLINENEWLINENEWLINEdef add(layer, event):NEWLINE """Add a new point at the clicked position."""NEWLINENEWLINE if event.type == 'mouse_press':NEWLINE start_pos = event.posNEWLINENEWLINE while event.type != 'mouse_release':NEWLINE yieldNEWLINENEWLINE dist = np.linalg.norm(start_pos - event.pos)NEWLINE if dist < DRAG_DIST_THRESHOLD:NEWLINE coordinates = layer.world_to_data(event.position)NEWLINE layer.add(coordinates)NEWLINENEWLINENEWLINEdef highlight(layer, event):NEWLINE """Highlight hovered points."""NEWLINE layer._set_highlight()NEWLINENEWLINENEWLINEdef _toggle_selected(selected_data, value):NEWLINE """Add or remove value from the selected data set.NEWLINENEWLINE ParametersNEWLINE ----------NEWLINE selected_data : setNEWLINE Set of selected data points to be modified.NEWLINE value : intNEWLINE Index of point to add or remove from selected data set.NEWLINENEWLINE ReturnsNEWLINE -------NEWLINE setNEWLINE Modified selected_data set.NEWLINE """NEWLINE if value in selected_data:NEWLINE selected_data.remove(value)NEWLINE else:NEWLINE selected_data.add(value)NEWLINENEWLINE return selected_dataNEWLINE
from unittest import mockNEWLINENEWLINEimport pytestNEWLINENEWLINEfrom bgmi.lib.models import Bangumi, Filter, FollowedNEWLINEfrom bgmi.main import mainNEWLINEfrom bgmi.website.bangumi_moe import BangumiMoeNEWLINENEWLINENEWLINEdef test_gen_nginx_conf():NEWLINE main("gen nginx.conf --server-name _".split())NEWLINENEWLINENEWLINE@pytest.mark.usefixtures("_clean_bgmi")NEWLINEdef test_cal_force_update():NEWLINE class MockWebsite(BangumiMoe):NEWLINE def fetch_bangumi_calendar(self):NEWLINE bangumi = BangumiMoe().fetch_bangumi_calendar()NEWLINE bangumi[0].update_time = "Unknown"NEWLINE return bangumiNEWLINENEWLINE with mock.patch("bgmi.lib.controllers.website", MockWebsite()):NEWLINE main("cal -f".split())NEWLINE assert [NEWLINE x.name for x in Bangumi.select().where(Bangumi.update_time == "Unknown")NEWLINE ], "at least 1 bangumi's update_time is 'Unknown'"NEWLINENEWLINENEWLINEdef test_cal_config():NEWLINE main("config".split())NEWLINE main("config ADMIN_TOKEN 233".split())NEWLINE main("config DOWNLOAD_DELEGATE xunlei".split())NEWLINE main("config BANGUMI_MOE_URL https://bangumi.moe".split())NEWLINENEWLINENEWLINEdef test_add(bangumi_names):NEWLINE main(["add", *bangumi_names])NEWLINENEWLINENEWLINE@pytest.mark.usefixtures("_clean_bgmi")NEWLINEdef test_update(bangumi_names):NEWLINE main(["add", *bangumi_names])NEWLINE main(["update"])NEWLINENEWLINENEWLINE@pytest.mark.usefixtures("_clean_bgmi")NEWLINEdef test_update_single(bangumi_names):NEWLINE name = bangumi_names[0]NEWLINE main(f"add {name}".split())NEWLINE main(["update", name])NEWLINENEWLINENEWLINE@pytest.mark.usefixtures("_clean_bgmi")NEWLINEdef test_search(bangumi_names):NEWLINE main(["search", "海贼王", "--regex-filter", ".*MP4.*720P.*"])NEWLINENEWLINENEWLINE@pytest.mark.usefixtures("_clean_bgmi")NEWLINEdef test_delete(bangumi_names):NEWLINE name = bangumi_names[0]NEWLINE main(f"add {name} --episode 0".split())NEWLINE main(f"delete --name {name}".split())NEWLINENEWLINENEWLINE@pytest.mark.usefixtures("_clean_bgmi")NEWLINEdef test_delete_batch(bangumi_names):NEWLINE main(["add", *bangumi_names, "--episode", "0"])NEWLINE main("delete --clear-all --batch".split())NEWLINENEWLINENEWLINE@pytest.mark.usefixtures("_clean_bgmi")NEWLINEdef test_filter(bangumi_names):NEWLINE name = bangumi_names[0]NEWLINE main(f"add {name} --episode 0".split())NEWLINE main(["filter", name, "--subtitle", "", "--exclude", "MKV", "--regex", "720p|720P"])NEWLINE f = Filter.get(bangumi_name=name, exclude="MKV", regex="720p|720P")NEWLINE assert not f.includeNEWLINE assert not f.subtitleNEWLINENEWLINENEWLINE@pytest.mark.usefixtures("_clean_bgmi")NEWLINEdef test_fetch(bangumi_names):NEWLINE name = bangumi_names[0]NEWLINE main(f"add {name} --episode 0".split())NEWLINE main(f"fetch {name}".split())NEWLINENEWLINENEWLINE@pytest.mark.usefixtures("_clean_bgmi")NEWLINEdef test_mark(bangumi_names):NEWLINE name = bangumi_names[0]NEWLINE main(f"add {name} --episode 0".split())NEWLINE main(f"mark {name} 1".split())NEWLINE assert Followed.get(bangumi_name=name).episode == 1NEWLINE
"""NEWLINE``cotk.metrics`` provides classes and functions evaluating results of models.NEWLINEIt provides a fair metric for every model.NEWLINE"""NEWLINEfrom typing import Any, List, DictNEWLINEimport hashlibNEWLINENEWLINEfrom .._utils.unordered_hash import UnorderedSha256, dumpsNEWLINEfrom .._utils.metaclass import LoadClassInterface, DocStringInheritorNEWLINENEWLINEclass MetricBase(LoadClassInterface, metaclass=DocStringInheritor):NEWLINE '''Base class for metrics.NEWLINE '''NEWLINENEWLINE DATALOADER_ARGUMENTS = \NEWLINE """dataloader (:class:`.dataloader.LanguageProcessing`, :class:`.dataloader.Sentence`, :class:`.dataloader.Session`): \NEWLINE A language generation dataloader."""NEWLINE MULTI_TURN_DATALOADER_ARGUMENTS = \NEWLINE """dataloader (:class:`.dataloader.LanguageProcessing`, :class:`.dataloader.Session`): \NEWLINE A language generation dataloader."""NEWLINE NGRAM_ARGUMENTS = \NEWLINE """ngram (int, optional): The order of ngram to calculate metrics like BLEU and Perplexity. Default: ``4``."""NEWLINE TOKENIZER_ARGUMENTS = \NEWLINE """tokenizer (None, :class:`.dataloader.Tokenizer`, str, optional): Specifies the tokenizer used in \NEWLINE the metric. Default: ``None``."""NEWLINE IGNORE_SMOOTHING_ERROR_ARGUMENTS = \NEWLINE """ignore_smoothing_error (bool, optional): Specifies whether to ignore the smoothing error when calculating \NEWLINE BLEU. Default: ``False``."""NEWLINE SAMPLE_ARGUMENTS_IN_BLEU = \NEWLINE """sample (int, optional): Number of examples sampled from the generated sentences. Default: ``1000``."""NEWLINE SAMPLE_ARGUMENTS_IN_NGRAM_PERPLEXITY = \NEWLINE SAMPLE_ARGUMENTS_IN_BLEU.replace("Default: ``1000``.", "Default: ``10000``.")NEWLINE SEED_ARGUMENTS = \NEWLINE """seed (int, optional): Random seed for sampling. Default: ``1229``."""NEWLINE REFERENCE_TEST_LIST_ARGUMENTS = \NEWLINE """reference_test_list (list): Reference sentences with :ref:`all vocabs <vocabulary_ref>` in test data."""NEWLINE REFERENCE_ALLVOCABS_KEY_ARGUMENTS = \NEWLINE """reference_allvocabs_key (str, optional): \NEWLINE The key of reference sentences. Default: ``ref_allvocabs``."""NEWLINE FORWARD_REFERENCE_ALLVOCABS_ARGUMENTS = \NEWLINE """* **data[reference_allvocabs_key]** (list, :class:`numpy.ndarray`): \NEWLINE A 2-d jagged or padded array of int. Reference sentences with \NEWLINE :ref:`allvocabs <vocabulary_ref>` in index form. \NEWLINE The sentences can optionally contain start tokens (eg: <go>), end tokens (eg: <eos>) and padding tokens (eg: <pad>), \NEWLINE which will be removed in the recorder.NEWLINE Size: ``[batch_size, ~ref_sentence_length]``, \NEWLINE where "~" means different sizes in this dimension is allowed."""NEWLINE FORWARD_BLEU_REFERENCE_ALLVOCABS_ARGUMENTS = \NEWLINE """* **data[reference_allvocabs_key]** (list, :class:`numpy.ndarray`): \NEWLINE A 2-d (3-d) jagged or padded array of int. Reference sentences with \NEWLINE :ref:`allvocabs <vocabulary_ref>` in index form. \NEWLINE Contains start token (eg: ``<go>``) and end token (eg: ``<eos>``). \NEWLINE Size: ``[batch_size, ~ref_sentence_length]`` (``[batch_size, ~ref_num, ~ref_sentence_length]``), \NEWLINE where "~" means different sizes in this dimension is allowed.NEWLINE Note that if this is a 3-d array, the second dim ``ref_num`` must be ``reference_num`` unless ``reference_num`` \NEWLINE is explicitly set ``None``."""NEWLINE FORWARD_REFERENCE_ALLVOCABS_ARGUMENTS_WITH_TORCH = \NEWLINE FORWARD_REFERENCE_ALLVOCABS_ARGUMENTS.replace("list, :class:`numpy.ndarray`", \NEWLINE "list, :class:`numpy.ndarray`, :class:`torch.Tensor`")NEWLINE FORWARD_POST_ALLVOCABS_ARGUMENTS = \NEWLINE FORWARD_REFERENCE_ALLVOCABS_ARGUMENTS.replace("reference_allvocabs_key", \NEWLINE "post_allvocabs_key")NEWLINE FORWARD_RESP_ALLVOCABS_ARGUMENTS = \NEWLINE FORWARD_REFERENCE_ALLVOCABS_ARGUMENTS.replace("reference_allvocabs_key", \NEWLINE "resp_allvocabs_key")NEWLINENEWLINE LABEL_KEY_ARGUMENTS = \NEWLINE """label_key (str): \NEWLINE The key of reference sentence labels. Default: ``label``."""NEWLINE LABEL_ARGUMENTS = """* **data[label_key]** (list or :class:`numpy.ndarray`): \NEWLINE A 1-d array of int. \NEWLINE Size: ``[batch_size]``, \NEWLINE each element refers to label of one sample"""NEWLINENEWLINE PREDICTION_KEY_ARGUMENTS = \NEWLINE """prediction_key (str): \NEWLINE The key of reference sentence predictions. Default: ``prediction``."""NEWLINE PREDICTION_ARGUMENTS = """* **data[prediction_key]** (list or :class:`numpy.ndarray`): \NEWLINE A 1-d array of int. \NEWLINE Size: ``[batch_size]``, \NEWLINE each element refers to prediction for one sample"""NEWLINENEWLINE MULTI_TURN_REFERENCE_ALLVOCABS_KEY_ARGUMENTS = \NEWLINE """multi_turn_reference_allvocabs_key (str, optional): \NEWLINE The key of reference sentences. Default: ``multi_turn_ref_allvocabs``."""NEWLINE FORWARD_MULTI_TURN_REFERENCE_ALLVOCABS_ARGUMENTS = \NEWLINE """* **data[multi_turn_reference_allvocabs_key]** (list, :class:`numpy.ndarray`): \NEWLINE A 3-d jagged or padded array of int. Multi-turn reference sentences with \NEWLINE :ref:`all vocabs <vocabulary_ref>`. \NEWLINE Special tokens such as start token (eg: ``<go>``) and end token (eg: ``<eos>``) is optional (indifferent to the result). \NEWLINE Padding token (i.e. ``<pad>``) is allowed.\NEWLINE where "~" means different sizes in this dimension is allowed."""NEWLINE FORWARD_MULTI_TURN_REFERENCE_ALLVOCABS_ARGUMENTS_WITH_TORCH = \NEWLINE FORWARD_MULTI_TURN_REFERENCE_ALLVOCABS_ARGUMENTS.replace("list, :class:`numpy.ndarray`", \NEWLINE "list, :class:`numpy.ndarray`, :class:`torch.Tensor`")NEWLINENEWLINE REFERENCE_LEN_KEY_ARGUMENTS = \NEWLINE """reference_len_key (str, optional): \NEWLINE The key of lengths of reference sentences. \NEWLINE Default: ``ref_length``."""NEWLINE FORWARD_REFERENCE_LEN_ARGUMENTS = \NEWLINE """* **data[reference_len_key]** (list, :class:`numpy.ndarray`): \NEWLINE Length of reference sentences. Contains start token (eg:``<go>``) \NEWLINE and end token (eg:``<eos>``). Size: ``[batch_size]``."""NEWLINENEWLINE MULTI_TURN_REFERENCE_LEN_KEY_ARGUMENTS = \NEWLINE """multi_turn_reference_len_key (str, optional): \NEWLINE The key of lengths of reference sentences. \NEWLINE Default: ``multi_turn_ref_length``."""NEWLINE FORWARD_MULTI_TURN_REFERENCE_LEN_ARGUMENTS = \NEWLINE """* **data[multi_turn_reference_len_key]** (list, :class:`numpy.ndarray`): \NEWLINE A 2-d jagged or padded array of int. **If padded, redundant position must be set to** ``0``. \NEWLINE Length of multi-turn reference sentences. Contains start token (eg:``<go>``) \NEWLINE and end token (eg:``<eos>``). Size: ``[batch_size, ~turn_length]``, \NEWLINE where "~" means different sizes in this dimension is allowed."""NEWLINENEWLINE GEN_KEY_ARGUMENTS = \NEWLINE """gen_key (str, optional): \NEWLINE The key of generated sentences. Default: ``gen``."""NEWLINE GEN_LOG_PROB_KEY_ARGUMENTS = \NEWLINE """gen_log_prob_key (str, optional): The key of predicted **log** probability over words. \NEWLINE Default: ``gen_log_prob``."""NEWLINE GENERATE_RARE_VOCAB_ARGUMENTS = \NEWLINE """generate_rare_vocab (bool, optional): Whether ``gen_log_prob`` contains :ref:`invalid vocab <vocabulary_ref>`. \NEWLINE Default: ``False``."""NEWLINE FULL_CHECK_ARGUMENTS = \NEWLINE """full_check (bool, optional): Whether to perform a full check on ``gen_log_prob`` to make sure the sumNEWLINE of probability is 1. Otherwise, a random check will be performed for efficiency.NEWLINE If PyTorch is used, a full check is always performed and this argument will be ignored.NEWLINE Default: ``False``."""NEWLINE FORWARD_GEN_ARGUMENTS = \NEWLINE """* **data[gen_key]** (list, :class:`numpy.ndarray`): \NEWLINE A 2-d jagged or padded array of int. \NEWLINE Sentences generated by model. Special tokens such as start token \NEWLINE The sentences can optionally contain start tokens (eg: <go>), end tokens (eg: <eos>) and padding tokens (eg: <pad>), \NEWLINE which will be removed in the recorder.NEWLINE Size: ``[batch_size, ~gen_sentence_length]``, \NEWLINE where "~" means different sizes in this dimension is allowed."""NEWLINENEWLINE MULTI_TURN_GEN_KEY_ARGUMENTS = \NEWLINE """multi_turn_gen_key (str, optional): \NEWLINE The key of generated sentences. Default: ``multi_turn_gen``."""NEWLINE FORWARD_MULTI_TURN_GEN_ARGUMENTS = \NEWLINE """* **data[gen_key]** (list, :class:`numpy.ndarray`): \NEWLINE A 3-d jagged or padded array of int. Sentences generated by model. \NEWLINE The sentences can optionally contain start tokens (eg: <go>), end tokens (eg: <eos>) and padding tokens (eg: <pad>), \NEWLINE which will be removed in the recorder.NEWLINE Size: ``[batch_size, ~max_turn_length, ~gen_sentence_length]``, \NEWLINE where "~" means different sizes in this dimension is allowed."""NEWLINENEWLINE MULTI_TURN_LENGTH_KEY_ARGUMENTS = \NEWLINE """turn_length (str, optional): \NEWLINE The key of length of turns. Default: ``turn_length``."""NEWLINE FORWARD_MULTI_TURN_LENGTH_ARGUMENTS = \NEWLINE """* **data[turn_len_key]** (list, :class:`numpy.ndarray`): \NEWLINE Length of turns in each sample. \NEWLINE Size: ``[batch_size]``."""NEWLINENEWLINE CPU_COUNT_ARGUMENTS = \NEWLINE """cpu_count (int, optional): Number of used cpu for multiprocessing. Multiprocessing will **NOT** be used \NEWLINE when ``cpu_count`` is set to ``1`` or the dataset is small. Default: If ``None``, \NEWLINE the environment variable ``CPU_COUNT`` will be used when available, \NEWLINE or all available cpu will be used otherwise."""NEWLINENEWLINE def __init__(self, name: str, version: int):NEWLINE self.unordered_hash = UnorderedSha256()NEWLINE self.ordered_hash = hashlib.sha256()NEWLINE self.name = nameNEWLINE self.version = versionNEWLINE self._hash_ordered_data((name, version))NEWLINE self.closed = FalseNEWLINENEWLINE def _hash_unordered_list(self, data_list: List[Any]):NEWLINE '''Invoked by :meth:`.forward` or :meth:`.close` to hash relevant data when computing a metric.NEWLINENEWLINE Arguments:NEWLINE data_list (list): relevant data organized as list.NEWLINE '''NEWLINE for item in data_list:NEWLINE self.unordered_hash.update_data(dumps(item))NEWLINENEWLINE def _hash_ordered_data(self, data: Any):NEWLINE self.ordered_hash.update(dumps(data))NEWLINENEWLINE def _hashvalue(self):NEWLINE '''Invoked by :meth:`.close` to return the recorded hash value.NEWLINE '''NEWLINE return hashlib.sha256(dumps((self.ordered_hash.hexdigest(), self.unordered_hash.hexdigest()))).hexdigest()NEWLINENEWLINE def forward(self, data: Dict[Any, Any]):NEWLINE '''Processing a batch of data.NEWLINENEWLINE Arguments:NEWLINE data (dict): A dict contains the data that metrics need.NEWLINE '''NEWLINE if self.closed:NEWLINE raise ValueError("The metric has been closed.")NEWLINE if not isinstance(data, dict):NEWLINE raise TypeError("Data must be a dict.")NEWLINENEWLINE def close(self) -> Dict[Any, Any]:NEWLINE '''NEWLINE Close the metric and return a dict containing results. Once the metric is closed,NEWLINE any operation on the metric (e.g. forward or another close) will raise a ValueError.NEWLINE '''NEWLINE if not self.closed:NEWLINE self.closed = TrueNEWLINE return {}NEWLINE else:NEWLINE raise RuntimeError("The metric has been closed.")NEWLINENEWLINEclass MetricChain(MetricBase):NEWLINE '''A metric-like class for stacked metric. You can use this classNEWLINE making multiples metric combination like one.NEWLINENEWLINE Examples:NEWLINE >>> metric = MetricChain()NEWLINE >>> metric.add_metric(BleuCorpusMetric())NEWLINE >>> metric.add_metric(SingleDialogRecorder(dataloader))NEWLINENEWLINE Todo: Give more examples to combining forward and closeNEWLINE '''NEWLINE _name = 'MetricChain'NEWLINE _version = 2NEWLINE def __init__(self):NEWLINE super().__init__(self._name, self._version)NEWLINE self.metric_list = []NEWLINENEWLINE def add_metric(self, metric: "MetricBase"):NEWLINE '''Add metric for processing.NEWLINENEWLINE Arguments:NEWLINE metric (:class:`.metric.MetricBase`): a metric class.NEWLINE '''NEWLINE if not isinstance(metric, MetricBase):NEWLINE raise TypeError("Metric must be a subclass of MetricBase")NEWLINE self.metric_list.append(metric)NEWLINENEWLINE def forward(self, data: Dict[Any, Any]):NEWLINE '''Processing a batch of data.NEWLINENEWLINE Arguments:NEWLINE data (dict): A dict at least contains keys which all theNEWLINE metric components need.NEWLINE '''NEWLINE super().forward(data)NEWLINE for metric in self.metric_list:NEWLINE metric.forward(data)NEWLINENEWLINE def close(self) -> Dict[Any, Any]:NEWLINE r'''Return a dict containing the items which all the metric components return.NEWLINE '''NEWLINE res = super().close()NEWLINE for metric in self.metric_list:NEWLINE res.update(metric.close())NEWLINE return resNEWLINE
"""Log consumers are responsible for fetching chia logsNEWLINEand propagating them to subscribers for further handling.NEWLINENEWLINEThis abstraction should provide an easy ability to switch betweenNEWLINElocal file reader and fetching logs from a remote machine.NEWLINEThe latter has not been implemented yet. Feel free to add it.NEWLINE"""NEWLINENEWLINE# stdNEWLINEimport loggingNEWLINEimport subprocessNEWLINEfrom abc import ABC, abstractmethodNEWLINEfrom pathlib import Path, PurePosixPath, PureWindowsPath, PurePathNEWLINEfrom threading import ThreadNEWLINEfrom typing import List, Optional, TupleNEWLINENEWLINE# projectNEWLINEfrom src.config import check_keys, is_win_platformNEWLINEfrom src.util import OSNEWLINENEWLINE# libNEWLINEimport paramikoNEWLINENEWLINENEWLINEclass LogConsumerSubscriber(ABC):NEWLINE """Interface for log consumer subscribers (i.e. handlers)"""NEWLINENEWLINE @abstractmethodNEWLINE def consume_logs(self, logs: str):NEWLINE """This method will be called when new logs are available"""NEWLINE passNEWLINENEWLINENEWLINEclass LogConsumer(ABC):NEWLINE """Abstract class providing common interface for log consumers"""NEWLINENEWLINE def __init__(self):NEWLINE self._subscribers: List[LogConsumerSubscriber] = []NEWLINENEWLINE @abstractmethodNEWLINE def stop(self):NEWLINE passNEWLINENEWLINE def subscribe(self, subscriber: LogConsumerSubscriber):NEWLINE self._subscribers.append(subscriber)NEWLINENEWLINE def _notify_subscribers(self, logs: str):NEWLINE for subscriber in self._subscribers:NEWLINE subscriber.consume_logs(logs)NEWLINENEWLINENEWLINEclass FileLogConsumer(LogConsumer):NEWLINE """Specific implementation for a simple file consumer"""NEWLINENEWLINE def __init__(self, log_path: Path):NEWLINE logging.info("Enabled file log consumer.")NEWLINE super().__init__()NEWLINE self._log_path = log_pathNEWLINE self._is_running = TrueNEWLINE self._thread = Thread(target=self._consume_loop)NEWLINE self._thread.start()NEWLINENEWLINE def stop(self):NEWLINE logging.info("Stopping")NEWLINE self._is_running = FalseNEWLINENEWLINE def _consume_loop(self):NEWLINE expanded_user_log_path = str(self._log_path.expanduser())NEWLINE logging.info(f"Consuming log file from {expanded_user_log_path}")NEWLINENEWLINE if is_win_platform():NEWLINE consume_command_args = ["powershell.exe", "get-content", expanded_user_log_path, "-tail", "1", "-wait"]NEWLINE else:NEWLINE consume_command_args = ["tail", "-F", expanded_user_log_path]NEWLINENEWLINE f = subprocess.Popen(consume_command_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)NEWLINE while self._is_running:NEWLINE log_line = f.stdout.readline().decode(encoding="utf-8")NEWLINE self._notify_subscribers(log_line)NEWLINENEWLINENEWLINEclass NetworkLogConsumer(LogConsumer):NEWLINE """Consume logs over the network"""NEWLINENEWLINE def __init__(NEWLINE self, remote_log_path: PurePath, remote_user: str, remote_host: str, remote_port: int, remote_platform: OSNEWLINE ):NEWLINE logging.info("Enabled network log consumer.")NEWLINE super().__init__()NEWLINENEWLINE self._remote_user = remote_userNEWLINE self._remote_host = remote_hostNEWLINE self._remote_port = remote_portNEWLINE self._remote_log_path = remote_log_pathNEWLINE self._remote_platform = remote_platformNEWLINENEWLINE self._ssh_client = paramiko.client.SSHClient()NEWLINE self._ssh_client.load_system_host_keys()NEWLINE self._ssh_client.connect(hostname=self._remote_host, username=self._remote_user, port=self._remote_port)NEWLINENEWLINE # Start threadNEWLINE self._is_running = TrueNEWLINE self._thread = Thread(target=self._consume_loop)NEWLINE self._thread.start()NEWLINENEWLINE def stop(self):NEWLINE logging.info("Stopping")NEWLINE self._is_running = FalseNEWLINENEWLINE def _consume_loop(self):NEWLINE logging.info(NEWLINE f"Consuming remote log file {self._remote_log_path}"NEWLINE + f" from {self._remote_host}:{self._remote_port} ({self._remote_platform})"NEWLINE )NEWLINENEWLINE if self._remote_platform == OS.WINDOWS:NEWLINE stdin, stdout, stderr = self._ssh_client.exec_command(NEWLINE f"powershell.exe Get-Content {self._remote_log_path} -Wait -Tail 1"NEWLINE )NEWLINE else:NEWLINE stdin, stdout, stderr = self._ssh_client.exec_command(f"tail -F {self._remote_log_path}")NEWLINENEWLINE while self._is_running:NEWLINE log_line = stdout.readline()NEWLINE self._notify_subscribers(log_line)NEWLINENEWLINENEWLINEdef get_host_info(host: str, user: str, path: str, port: int) -> Tuple[OS, PurePath]:NEWLINENEWLINE client = paramiko.client.SSHClient()NEWLINE client.load_system_host_keys()NEWLINE client.connect(hostname=host, username=user, port=port)NEWLINENEWLINE stdin, stdout, stderr = client.exec_command("uname -a")NEWLINE fout: str = stdout.readline().lower()NEWLINE ferr: str = stderr.readline().lower()NEWLINENEWLINE if "linux" in fout:NEWLINE return OS.LINUX, PurePosixPath(path)NEWLINE elif "darwin" in fout:NEWLINE return OS.MACOS, PurePosixPath(path)NEWLINE elif "not recognized" in ferr:NEWLINE return OS.WINDOWS, PureWindowsPath(path)NEWLINE else:NEWLINE logging.error("Found unsupported platform on remote host, assuming Linux and hope for the best.")NEWLINENEWLINE return OS.LINUX, PurePosixPath(path)NEWLINENEWLINENEWLINEdef create_log_consumer_from_config(config: dict) -> Optional[LogConsumer]:NEWLINE enabled_consumer = NoneNEWLINE for consumer in config.keys():NEWLINE if config[consumer]["enable"]:NEWLINE if enabled_consumer:NEWLINE logging.error("Detected multiple enabled consumers. This is unsupported configuration!")NEWLINE return NoneNEWLINE enabled_consumer = consumerNEWLINE if enabled_consumer is None:NEWLINE logging.error("Couldn't find enabled log consumer in config.yaml")NEWLINE return NoneNEWLINENEWLINE enabled_consumer_config = config[enabled_consumer]NEWLINENEWLINE if enabled_consumer == "file_log_consumer":NEWLINE if not check_keys(required_keys=["file_path"], config=enabled_consumer_config):NEWLINE return NoneNEWLINE return FileLogConsumer(log_path=Path(enabled_consumer_config["file_path"]))NEWLINENEWLINE if enabled_consumer == "network_log_consumer":NEWLINE if not check_keys(NEWLINE required_keys=["remote_file_path", "remote_host", "remote_user"],NEWLINE config=enabled_consumer_config,NEWLINE ):NEWLINE return NoneNEWLINENEWLINE # default SSH Port : 22NEWLINE remote_port = enabled_consumer_config.get("remote_port", 22)NEWLINENEWLINE platform, path = get_host_info(NEWLINE enabled_consumer_config["remote_host"],NEWLINE enabled_consumer_config["remote_user"],NEWLINE enabled_consumer_config["remote_file_path"],NEWLINE remote_port,NEWLINE )NEWLINENEWLINE return NetworkLogConsumer(NEWLINE remote_log_path=path,NEWLINE remote_host=enabled_consumer_config["remote_host"],NEWLINE remote_user=enabled_consumer_config["remote_user"],NEWLINE remote_port=remote_port,NEWLINE remote_platform=platform,NEWLINE )NEWLINENEWLINE logging.error("Unhandled consumer type")NEWLINE return NoneNEWLINE
from dataclasses import dataclass, asdictNEWLINEfrom typing import ListNEWLINENEWLINEfrom bson import ObjectIdNEWLINEfrom passlib.hash import pbkdf2_sha256 as sha256NEWLINEfrom pymongo.database import DatabaseNEWLINENEWLINEfrom app import dbNEWLINEdb: DatabaseNEWLINENEWLINENEWLINE@dataclassNEWLINEclass UserModel:NEWLINE username: strNEWLINE pwhash: strNEWLINE id: str = NoneNEWLINENEWLINE @staticmethodNEWLINE def hash_password(password):NEWLINE return sha256.hash(password)NEWLINENEWLINE @staticmethodNEWLINE def verify_password(password, hash):NEWLINE return sha256.verify(password, hash)NEWLINENEWLINE def insert(self):NEWLINE new_doc = db['users'].insert_one({NEWLINE 'username': self.username,NEWLINE 'pwhash': self.pwhashNEWLINE })NEWLINE self.id = str(new_doc.inserted_id)NEWLINENEWLINE @classmethodNEWLINE def find_by_id(cls, userid):NEWLINE doc = db['users'].find_one({'_id': ObjectId(userid)})NEWLINE if doc is not None:NEWLINE return UserModel(doc['username'], doc['pwhash'], str(doc['_id']))NEWLINENEWLINE @classmethodNEWLINE def find_by_username(cls, username):NEWLINE doc = db['users'].find_one({'username': username})NEWLINE if doc is not None:NEWLINE return UserModel(username=doc['username'], pwhash=doc['pwhash'], id=str(doc['_id']))NEWLINENEWLINENEWLINE@dataclassNEWLINEclass NoteModel:NEWLINE title: strNEWLINE markdown: strNEWLINE userid: strNEWLINE tags: List[str]NEWLINE id: str = NoneNEWLINENEWLINE def insert(self):NEWLINE new_doc = db['notes'].insert_one({NEWLINE 'title': self.title,NEWLINE 'markdown': self.markdown,NEWLINE 'userid': ObjectId(self.userid),NEWLINE 'tags': self.tagsNEWLINE })NEWLINE self.id = str(new_doc.inserted_id)NEWLINENEWLINE def update(self):NEWLINE update_result = db['notes'].update({'_id': ObjectId(self.id)}, {'$set': {NEWLINE 'title': self.title,NEWLINE 'markdown': self.markdown,NEWLINE 'userid': ObjectId(self.userid),NEWLINE 'tags': self.tagsNEWLINE }})NEWLINENEWLINE @classmethodNEWLINE def find_by_id(cls, noteid):NEWLINE doc = db['notes'].find_one({'_id': ObjectId(noteid)})NEWLINE if doc is not None:NEWLINE return NoteModel(doc['title'], doc['markdown'], str(doc['userid']), doc['tags'], str(doc['_id']))NEWLINENEWLINE @classmethodNEWLINE def delete_by_id(cls, noteid):NEWLINE db['notes'].delete_one({'_id': ObjectId(noteid)})NEWLINENEWLINE @classmethodNEWLINE def find_all_by_userid(cls, userid):NEWLINE docs = db['notes'].find({'userid': ObjectId(userid)})NEWLINE return [NEWLINE NoteModel(doc['title'], doc['markdown'], str(doc['userid']), doc['tags'], str(doc['_id']))NEWLINE for doc in docsNEWLINE ]NEWLINE
# -*- coding: utf-8 -*-NEWLINENEWLINEimport osNEWLINEimport timeNEWLINEimport waveNEWLINEfrom Queue import QueueNEWLINEfrom threading import ThreadNEWLINENEWLINEfrom bottle import route, run, request, static_file, viewNEWLINENEWLINEfrom recognition import recognizeNEWLINEfrom textlogger import add_log, get_logsNEWLINENEWLINEupload_dir = 'upload_dir/'NEWLINENEWLINENEWLINE@route('/', method='GET')NEWLINE@view('logs')NEWLINEdef logs():NEWLINE return dict(logs=get_logs())NEWLINENEWLINENEWLINE@route('/wave', method='POST')NEWLINEdef do_upload():NEWLINE wav_file = request.files.get('file')NEWLINE name, ext = os.path.splitext(wav_file.filename)NEWLINE # Listnr uploads audio data as “sample.r16”NEWLINE if ext not in ('.r16'):NEWLINE return 'File extension not allowed.'NEWLINENEWLINE if not os.path.exists(upload_dir):NEWLINE os.mkdir(upload_dir)NEWLINENEWLINE file_name = str(int(time.time())) + '.wav'NEWLINE file_path = os.path.join(upload_dir, file_name)NEWLINE write_wave(file_path, wav_file.file.read())NEWLINE q.put({NEWLINE "file_path": file_path,NEWLINE "file_name": file_nameNEWLINE })NEWLINE return 'OK'NEWLINENEWLINENEWLINE@route('/files/<filename:re:.+\.wav>')NEWLINEdef wav_files(filename):NEWLINE return static_file(filename, root=upload_dir)NEWLINENEWLINENEWLINE@route('/img/<filename:re:.+\.png>')NEWLINEdef img_files(filename):NEWLINE return static_file(filename, root='img/')NEWLINENEWLINENEWLINE@route('/css/<filename:re:.+\.css>')NEWLINEdef css_files(filename):NEWLINE return static_file(filename, root='css/')NEWLINENEWLINENEWLINE@route('/js/<filename:re:.+\.js>')NEWLINEdef js_files(filename):NEWLINE return static_file(filename, root='js/')NEWLINENEWLINENEWLINEdef write_wave(file_path, wave_bin):NEWLINE wave_file = wave.open(file_path, 'wb')NEWLINE # Mono, 16bit, 16kHzNEWLINE wave_file.setparams((1, 2, 16000, 0, 'NONE', 'not compressed'))NEWLINE wave_file.writeframes(wave_bin)NEWLINE wave_file.close()NEWLINENEWLINENEWLINEdef worker():NEWLINE while True:NEWLINE item = q.get()NEWLINE text = recognize(item["file_path"], language="ja-JP")NEWLINE add_log(item["file_name"], text)NEWLINE q.task_done()NEWLINENEWLINENEWLINEq = Queue()NEWLINEt = Thread(target=worker)NEWLINEt.daemon = TrueNEWLINEt.start()NEWLINENEWLINErun(host='0.0.0.0', port=8080, debug=True, reloader=True)NEWLINE
# -*- coding: utf-8 -*-NEWLINE"""NEWLINECreated on Tue Dec 13 01:25:12 2017NEWLINEComplete document analysis:NEWLINE1) Fuzzy String compare for file similarityNEWLINE2) Word frequency counterNEWLINE3) Phrase frequency counterNEWLINE@author: MStattelmanNEWLINE"""NEWLINENEWLINE#ImportsNEWLINEimport pandas as pdNEWLINEimport globNEWLINEimport reNEWLINEimport osNEWLINEimport nltkNEWLINEimport collectionsNEWLINEfrom collections import CounterNEWLINEfrom nltk import ngramsNEWLINEimport sysNEWLINEfrom math import logNEWLINEimport timeNEWLINEimport difflibNEWLINEimport itertoolsNEWLINEimport uuidNEWLINEfrom functools import reduceNEWLINEfrom statistics import mean, stdevNEWLINENEWLINENEWLINENEWLINE#--------------Set up directories and VariablesNEWLINE#Set start time to calculate time of processingNEWLINEstart = time.time()NEWLINE#Set file extension for specific filetypesNEWLINEfileext = '.txt'NEWLINE#Set directory of files for processingNEWLINEcompdir = 'datafiles/'NEWLINE#Create a output directory based on a UIDNEWLINEgui = os.path.join(str(uuid.uuid4().hex))NEWLINEoutdir = gui +'/'NEWLINEif not os.path.exists(outdir):NEWLINE os.makedirs(outdir)NEWLINENEWLINE#get all of the files in the directory into a listNEWLINEtxt_files = list(filter(lambda x: x.endswith(fileext), os.listdir(compdir)))NEWLINENEWLINEdef geo_mean_calc(n):NEWLINE """NEWLINE Calculate the GeomaenNEWLINE """NEWLINE geomean = lambda n: reduce(lambda x,y: x*y, n) ** (1.0 / len(n))NEWLINE return geomean(n)NEWLINENEWLINENEWLINEdef compareEach(x,y):NEWLINE """NEWLINE Compare the 2 files passed in using fuzzy string compareNEWLINE """NEWLINE with open(compdir + x, 'r') as myfile:NEWLINE data=myfile.read().replace('\n', '').lower()NEWLINE myfile.close()NEWLINE with open(compdir + y, 'r') as myfile2:NEWLINE data2=myfile2.read().replace('\n', '').lower() NEWLINE myfile2.close()NEWLINE NEWLINE return difflib.SequenceMatcher(None, data, data2).ratio()NEWLINE NEWLINE#Set up lists for file names and Fuzzy logic calculationsNEWLINEaList = []NEWLINEf1 = []NEWLINEf2 = []NEWLINEbList = []NEWLINE#Loop through each list item and compare it against the other itemsNEWLINEfor a, b in itertools.combinations(txt_files, 2):NEWLINE aList.append("File ["+a+"] and file ["+b+"] has a similarity of ");NEWLINE f1.append(a)NEWLINE f2.append(b)NEWLINE bList.append(compareEach(a,b));NEWLINE NEWLINENEWLINE#Combine both lists into a corolary dictionaryNEWLINEd= dict(zip(aList, bList))NEWLINENEWLINE#Save sorted dict as new dictionary from most similar to leastNEWLINEd1 = dict(sorted(d.items(), key=lambda x: x[1], reverse=True))NEWLINENEWLINE#Save results to file:NEWLINEfo = open(outdir+'datafile-comparison.txt', "w")NEWLINE#Print Headers to fileNEWLINEfo.write('File similarity ranked from most to least similar:\n\n')NEWLINEfo.write('Geometric Mean:'+str(geo_mean_calc(bList))+'\n\n')NEWLINEfo.write('Arithmatic Mean:'+str(mean(bList))+'\n\n')NEWLINE#Print Output to fileNEWLINEfor k, v in d1.items():NEWLINE fo.write(str(k) + ' >>> '+ str(v) + '\n\n')NEWLINEfo.close()NEWLINENEWLINENEWLINENEWLINE#Use tweet tokenizer to prevent contracted words from splitingNEWLINEfrom nltk.tokenize import TweetTokenizerNEWLINENEWLINEdef remove_punctuation(text):NEWLINE # Removes all punctuation and conotation from the string and returns a 'plain' stringNEWLINE punctuation2 = '-&'+'®©™€â´‚³©¥ã¼•ž®è±äüöž!@#“§$%^*()î_+€$=¿{”}[]:«;"»\â¢|<>,.?/~`0123456789'NEWLINE for sign in punctuation2:NEWLINE text = text.replace(sign, " ")NEWLINE return textNEWLINENEWLINENEWLINE#Set length of word combinations for use in counters.NEWLINEphrase_len = 4NEWLINEterm_len = 1NEWLINENEWLINEcorpus = []NEWLINEpath = compdirNEWLINENEWLINEfile_list = []NEWLINEos.chdir(path)NEWLINE#Get all files in the directory loaded into the corpusNEWLINEfor file in glob.glob("*.txt"):NEWLINE file_list.append(file)NEWLINE f = open(file)NEWLINE corpus.append(remove_punctuation(f.read()))NEWLINE f.close()NEWLINENEWLINEfrequencies0 = Counter([])NEWLINEfrequencies = Counter([])NEWLINE#Cycle through corpus to generate frequencies metricsNEWLINEfor text in corpus:NEWLINE tknzr = TweetTokenizer()NEWLINE token = tknzr.tokenize(text)NEWLINE #Frequency for wordsNEWLINE single = ngrams(token, term_len)NEWLINE frequencies0 += Counter(single)NEWLINE #Frequency for phrasesNEWLINE quadgrams = ngrams(token, phrase_len)NEWLINE frequencies += Counter(quadgrams)NEWLINENEWLINEod0 = collections.OrderedDict(frequencies0.most_common())NEWLINEod = collections.OrderedDict(frequencies.most_common())NEWLINENEWLINE#Build dataframesNEWLINEos.chdir('..')NEWLINENEWLINE#Create output for fuzzy string compare as dataframeNEWLINEdfz = pd.DataFrame(list(zip(f1, f2, bList)),NEWLINE columns=['File #1','File #2', 'Similarity'])NEWLINEdfz.sort_values(["Similarity"], inplace=True, ascending=False)NEWLINEdfz.index = pd.RangeIndex(len(dfz.index))NEWLINENEWLINE#Create output for word frequency dataframeNEWLINEdf0 = pd.DataFrame.from_dict(od0, orient='index').reset_index()NEWLINEdf0 = df0.rename(columns={'index':'Word', 0:'Count'})NEWLINENEWLINENEWLINE#Create output for Phrase frequency as dataframeNEWLINEdf = pd.DataFrame.from_dict(od, orient='index').reset_index()NEWLINEdf = df.rename(columns={'index':'Phrase', 0:'Count'})NEWLINENEWLINENEWLINE#Get a count of all words and phrasesNEWLINECount_Words=df0.shape[0]NEWLINECount_Phrase=df.shape[0]NEWLINENEWLINE#Generate html files from dataframesNEWLINEdfz.to_html(open(outdir +'Sim.html', 'a'))NEWLINEdf0.to_html(open(outdir +'Word.html', 'a'))NEWLINEdf.to_html(open(outdir +'Phrase.html', 'a'))NEWLINENEWLINENEWLINE#Write File list to FileNEWLINEwith open (outdir+"complete.txt","a")as fp1:NEWLINE fp1.write("Execution time: " + str(time.time() - start) +"s\n\n")NEWLINE fp1.write("With a total unique word count of:"+str(Count_Words)+"\n\n")NEWLINE fp1.write("With a total unique phrase count of:"+str(Count_Phrase)+"\n\n")NEWLINE fp1.write("The following files ("+str(len(file_list))+") were processed in the comparisons:\n\n")NEWLINE for line in file_list:NEWLINE fp1.write(line+"\n\n")NEWLINE fp1.close()NEWLINENEWLINE#Generate Analysis pdf form files collectionNEWLINEimport pdfkitNEWLINEpdfkit.from_file([outdir+"complete.txt",outdir+'Sim.html',outdir +'Word.html',outdir +'Phrase.html'], outdir +' Task-'+gui+'-Document-Analysis.pdf')
import osNEWLINEimport reNEWLINEfrom collections import defaultdictNEWLINEfrom dataclasses import dataclassNEWLINEfrom typing import Dict, SetNEWLINENEWLINEimport easyargsNEWLINEfrom colorama import Fore, StyleNEWLINEfrom javalang import parse as parserNEWLINEfrom javalang.parser import JavaSyntaxErrorNEWLINEfrom javalang.tree import (NEWLINE ClassDeclaration,NEWLINE FieldDeclaration,NEWLINE FormalParameter,NEWLINE Import,NEWLINE Literal,NEWLINE MethodInvocation,NEWLINE ReferenceType,NEWLINE VariableDeclaration,NEWLINE)NEWLINEfrom tqdm import tqdmNEWLINENEWLINENEWLINE@dataclass(frozen=True)NEWLINEclass CalleeDesc:NEWLINE class_name: strNEWLINE method_name: strNEWLINENEWLINENEWLINEdef traverse(t, visitor):NEWLINE if not hasattr(t, "children"):NEWLINE returnNEWLINE for child in t.children:NEWLINE if not child:NEWLINE continueNEWLINE if isinstance(child, list):NEWLINE for item in child:NEWLINE traverse(item, visitor)NEWLINE else:NEWLINE traverse(child, visitor)NEWLINE visitor(t)NEWLINENEWLINENEWLINEdef print_file_name(filename):NEWLINE tqdm.write(Fore.RED + filename + Style.RESET_ALL)NEWLINENEWLINENEWLINEclass Scanner:NEWLINE def __init__(self, filename) -> None:NEWLINENEWLINE self.filename = filenameNEWLINE with open(filename) as fl:NEWLINE text = fl.read()NEWLINE self.AST = parser.parse(text)NEWLINE self.text_by_lines = text.split("\n")NEWLINE self.env: Dict[str, str] = {}NEWLINE self.call_nodes: Dict[CalleeDesc, Set[MethodInvocation]] = defaultdict(set)NEWLINE self.imports: Set[str] = set()NEWLINE self.wildcard_imports: Set[str] = set()NEWLINE self.extend_nodes: Dict[str, Set[ClassDeclaration]] = defaultdict(set)NEWLINE traverse(self.AST, self.visitor)NEWLINE self.inverted_import_names = {NEWLINE import_name.split(".")[-1]: import_name for import_name in self.importsNEWLINE }NEWLINENEWLINE def lookup_type(self, name):NEWLINE if name in self.env:NEWLINE return self.env[name]NEWLINE return NoneNEWLINENEWLINE @staticmethodNEWLINE def literal_arguments_only(call_node):NEWLINE for arg in call_node.arguments:NEWLINE if not isinstance(arg, Literal):NEWLINE return FalseNEWLINE return TrueNEWLINENEWLINE def push_type_resolution(self, name, assigned_type):NEWLINE self.env[name] = assigned_typeNEWLINENEWLINE def visitor(self, node):NEWLINE if isinstance(node, Import):NEWLINE if node.wildcard:NEWLINE self.wildcard_imports.add(node.path)NEWLINE else:NEWLINE self.imports.add(node.path)NEWLINENEWLINE elif isinstance(node, ClassDeclaration):NEWLINE if isinstance(node.extends, ReferenceType):NEWLINE self.push_type_resolution(node.name, node.extends.name)NEWLINE self.extend_nodes[node.extends.name].add(node)NEWLINENEWLINE elif isinstance(node, (VariableDeclaration, FieldDeclaration)):NEWLINE for declarator in node.declarators:NEWLINE self.push_type_resolution(declarator.name, node.type.name)NEWLINENEWLINE elif isinstance(node, MethodInvocation):NEWLINE relevant_type = self.lookup_type(node.qualifier)NEWLINE if relevant_type:NEWLINE self.call_nodes[CalleeDesc(relevant_type, node.member)].add(node)NEWLINENEWLINE elif isinstance(node, FormalParameter):NEWLINE self.push_type_resolution(node.name, node.type.name)NEWLINENEWLINE def class_name_matches(self, class_name, regex):NEWLINE if "." in class_name:NEWLINE return regex.match(class_name)NEWLINE if class_name in self.inverted_import_names:NEWLINE return regex.match(self.inverted_import_names[class_name])NEWLINE return any(NEWLINE regex.match(f"{wildcard_import}.{class_name}")NEWLINE for wildcard_import in self.wildcard_importsNEWLINE )NEWLINENEWLINE def find_calls(self, class_regex_compiled, method_regex_compiled):NEWLINE for callee, call_nodes in self.call_nodes.items():NEWLINE if self.class_name_matches(NEWLINE callee.class_name, class_regex_compiledNEWLINE ) and method_regex_compiled.match(callee.method_name):NEWLINENEWLINE non_constant_calls = [NEWLINE node for node in call_nodes if not self.literal_arguments_only(node)NEWLINE ]NEWLINE if non_constant_calls:NEWLINE yield non_constant_callsNEWLINENEWLINE def find_extends(self, class_compiled_regex):NEWLINE for class_name, class_nodes in self.extend_nodes.items():NEWLINE if self.class_name_matches(class_name, class_compiled_regex):NEWLINE yield class_nodesNEWLINENEWLINE def print_node_code_lines(self, nodes):NEWLINE for node in nodes:NEWLINE tqdm.write(NEWLINE Fore.GREENNEWLINE + f"{node.position.line:5d} "NEWLINE + Style.RESET_ALLNEWLINE + f"{self.text_by_lines[node.position.line - 1]}"NEWLINE )NEWLINENEWLINENEWLINEdef quick_match(filename, match_regex_compiled):NEWLINE txt = open(filename).read()NEWLINE return match_regex_compiled.search(txt)NEWLINENEWLINENEWLINEdef find_use_in_file(filename, root_folder, class_regex, method_regex):NEWLINE class_regex_compiled = re.compile(class_regex)NEWLINE method_regex_compiled = re.compile(method_regex)NEWLINE if not quick_match(filename, class_regex_compiled):NEWLINE returnNEWLINE relative_filename = os.path.relpath(filename, root_folder)NEWLINE first = TrueNEWLINE extends = FalseNEWLINE scanner = Scanner(filename)NEWLINE for nodelist in scanner.find_calls(class_regex_compiled, method_regex_compiled):NEWLINE if first:NEWLINE print_file_name(relative_filename)NEWLINE first = FalseNEWLINE scanner.print_node_code_lines(nodelist)NEWLINE for nodelist in scanner.find_extends(class_regex_compiled):NEWLINE if first:NEWLINE print_file_name(relative_filename)NEWLINE first = FalseNEWLINE extends = TrueNEWLINE scanner.print_node_code_lines(nodelist)NEWLINE if extends:NEWLINE tqdm.write(NEWLINE f"{Fore.RED}!!! Warning: vulnerable class extended !!!{Style.RESET_ALL}"NEWLINE )NEWLINENEWLINENEWLINEdef traverse_folder(root_dir):NEWLINE for directory, dirs, files in os.walk(root_dir):NEWLINE for filename in files:NEWLINE if filename.endswith(".java"):NEWLINE yield os.path.join(directory, filename)NEWLINENEWLINENEWLINE@easyargsNEWLINEdef scan(NEWLINE root_dir,NEWLINE class_regex=r"org.apache.logging.log4j.Logger",NEWLINE method_regex="(info|warn|error|log|debug|trace|fatal|catching|throwing|traceEntry|printf|logMessage)",NEWLINE):NEWLINE parsing_failed_files = []NEWLINE for filename in tqdm(list(traverse_folder(root_dir))):NEWLINE try:NEWLINE find_use_in_file(filename, root_dir, class_regex, method_regex)NEWLINE except (IOError, JavaSyntaxError):NEWLINE parsing_failed_files.append(filename)NEWLINE if parsing_failed_files:NEWLINE with open("err.log", "w") as f:NEWLINE f.write("Parsing failed:\n" + "\n".join(parsing_failed_files))NEWLINENEWLINENEWLINEif __name__ == "__main__":NEWLINE scan()NEWLINE
# Generated by Django 3.2.4 on 2021-06-06 05:11NEWLINENEWLINEfrom django.conf import settingsNEWLINEfrom django.db import migrations, modelsNEWLINEimport django.db.models.deletionNEWLINENEWLINENEWLINEclass Migration(migrations.Migration):NEWLINENEWLINE initial = TrueNEWLINENEWLINE dependencies = [NEWLINE ("farm", "0001_initial"),NEWLINE migrations.swappable_dependency(settings.AUTH_USER_MODEL),NEWLINE ("geometry", "0001_initial"),NEWLINE ]NEWLINENEWLINE operations = [NEWLINE migrations.CreateModel(NEWLINE name="Plot",NEWLINE fields=[NEWLINE (NEWLINE "id",NEWLINE models.AutoField(NEWLINE auto_created=True,NEWLINE primary_key=True,NEWLINE serialize=False,NEWLINE verbose_name="ID",NEWLINE ),NEWLINE ),NEWLINE (NEWLINE "name",NEWLINE models.CharField(NEWLINE blank=True, default="Unnamed Plot", max_length=128NEWLINE ),NEWLINE ),NEWLINE (NEWLINE "description",NEWLINE models.CharField(blank=True, default="", max_length=1024),NEWLINE ),NEWLINE (NEWLINE "type",NEWLINE models.CharField(NEWLINE blank=True,NEWLINE choices=[NEWLINE ("F", "field"),NEWLINE ("W", "forest"),NEWLINE ("G", "garden"),NEWLINE ("O", "orchard"),NEWLINE ("P", "pasture"),NEWLINE ("S", "silvopasture"),NEWLINE ],NEWLINE default=None,NEWLINE max_length=1,NEWLINE null=True,NEWLINE ),NEWLINE ),NEWLINE (NEWLINE "farm",NEWLINE models.ForeignKey(NEWLINE on_delete=django.db.models.deletion.CASCADE, to="farm.farm"NEWLINE ),NEWLINE ),NEWLINE (NEWLINE "farmer",NEWLINE models.ForeignKey(NEWLINE on_delete=django.db.models.deletion.CASCADE,NEWLINE to=settings.AUTH_USER_MODEL,NEWLINE ),NEWLINE ),NEWLINE (NEWLINE "parent",NEWLINE models.ForeignKey(NEWLINE blank=True,NEWLINE null=True,NEWLINE on_delete=django.db.models.deletion.CASCADE,NEWLINE to="plot.plot",NEWLINE ),NEWLINE ),NEWLINE (NEWLINE "shape",NEWLINE models.ForeignKey(NEWLINE default=None,NEWLINE null=True,NEWLINE on_delete=django.db.models.deletion.CASCADE,NEWLINE to="geometry.shape",NEWLINE ),NEWLINE ),NEWLINE ],NEWLINE options={NEWLINE "db_table": "plot",NEWLINE },NEWLINE ),NEWLINE ]NEWLINE
#NEWLINE# Copyright (c) 2021 Citrix Systems, Inc.NEWLINE#NEWLINE# Licensed under the Apache License, Version 2.0 (the "License")NEWLINE# you may not use this file except in compliance with the License.NEWLINE# You may obtain a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS,NEWLINE# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE# See the License for the specific language governing permissions andNEWLINE# limitations under the License.NEWLINE#NEWLINENEWLINEfrom nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resourceNEWLINEfrom nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_responseNEWLINEfrom nssrc.com.citrix.netscaler.nitro.service.options import optionsNEWLINEfrom nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exceptionNEWLINENEWLINEfrom nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_utilNEWLINENEWLINEclass rdpclientprofile(base_resource) :NEWLINE """ Configuration for RDP clientprofile resource. """NEWLINE def __init__(self) :NEWLINE self._name = NoneNEWLINE self._rdpurloverride = NoneNEWLINE self._redirectclipboard = NoneNEWLINE self._redirectdrives = NoneNEWLINE self._redirectprinters = NoneNEWLINE self._redirectcomports = NoneNEWLINE self._redirectpnpdevices = NoneNEWLINE self._keyboardhook = NoneNEWLINE self._audiocapturemode = NoneNEWLINE self._videoplaybackmode = NoneNEWLINE self._multimonitorsupport = NoneNEWLINE self._rdpcookievalidity = NoneNEWLINE self._addusernameinrdpfile = NoneNEWLINE self._rdpfilename = NoneNEWLINE self._rdphost = NoneNEWLINE self._rdplistener = NoneNEWLINE self._rdpcustomparams = NoneNEWLINE self._psk = NoneNEWLINE self._randomizerdpfilename = NoneNEWLINE self._rdplinkattribute = NoneNEWLINE self._builtin = NoneNEWLINE self._feature = NoneNEWLINE self.___count = NoneNEWLINENEWLINE @propertyNEWLINE def name(self) :NEWLINE r"""The name of the rdp profile.<br/>Minimum length = 1.NEWLINE """NEWLINE try :NEWLINE return self._nameNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @name.setterNEWLINE def name(self, name) :NEWLINE r"""The name of the rdp profile.<br/>Minimum length = 1NEWLINE """NEWLINE try :NEWLINE self._name = nameNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def rdpurloverride(self) :NEWLINE r"""This setting determines whether the RDP parameters supplied in the vpn url override those specified in the RDP profile.<br/>Default value: ENABLE<br/>Possible values = ENABLE, DISABLE.NEWLINE """NEWLINE try :NEWLINE return self._rdpurloverrideNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @rdpurloverride.setterNEWLINE def rdpurloverride(self, rdpurloverride) :NEWLINE r"""This setting determines whether the RDP parameters supplied in the vpn url override those specified in the RDP profile.<br/>Default value: ENABLE<br/>Possible values = ENABLE, DISABLENEWLINE """NEWLINE try :NEWLINE self._rdpurloverride = rdpurloverrideNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def redirectclipboard(self) :NEWLINE r"""This setting corresponds to the Clipboard check box on the Local Resources tab under Options in RDC.<br/>Default value: ENABLE<br/>Possible values = ENABLE, DISABLE.NEWLINE """NEWLINE try :NEWLINE return self._redirectclipboardNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @redirectclipboard.setterNEWLINE def redirectclipboard(self, redirectclipboard) :NEWLINE r"""This setting corresponds to the Clipboard check box on the Local Resources tab under Options in RDC.<br/>Default value: ENABLE<br/>Possible values = ENABLE, DISABLENEWLINE """NEWLINE try :NEWLINE self._redirectclipboard = redirectclipboardNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def redirectdrives(self) :NEWLINE r"""This setting corresponds to the selections for Drives under More on the Local Resources tab under Options in RDC.<br/>Default value: DISABLE<br/>Possible values = ENABLE, DISABLE.NEWLINE """NEWLINE try :NEWLINE return self._redirectdrivesNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @redirectdrives.setterNEWLINE def redirectdrives(self, redirectdrives) :NEWLINE r"""This setting corresponds to the selections for Drives under More on the Local Resources tab under Options in RDC.<br/>Default value: DISABLE<br/>Possible values = ENABLE, DISABLENEWLINE """NEWLINE try :NEWLINE self._redirectdrives = redirectdrivesNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def redirectprinters(self) :NEWLINE r"""This setting corresponds to the selection in the Printers check box on the Local Resources tab under Options in RDC.<br/>Default value: ENABLE<br/>Possible values = ENABLE, DISABLE.NEWLINE """NEWLINE try :NEWLINE return self._redirectprintersNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @redirectprinters.setterNEWLINE def redirectprinters(self, redirectprinters) :NEWLINE r"""This setting corresponds to the selection in the Printers check box on the Local Resources tab under Options in RDC.<br/>Default value: ENABLE<br/>Possible values = ENABLE, DISABLENEWLINE """NEWLINE try :NEWLINE self._redirectprinters = redirectprintersNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def redirectcomports(self) :NEWLINE r"""This setting corresponds to the selections for comports under More on the Local Resources tab under Options in RDC.<br/>Default value: DISABLE<br/>Possible values = ENABLE, DISABLE.NEWLINE """NEWLINE try :NEWLINE return self._redirectcomportsNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @redirectcomports.setterNEWLINE def redirectcomports(self, redirectcomports) :NEWLINE r"""This setting corresponds to the selections for comports under More on the Local Resources tab under Options in RDC.<br/>Default value: DISABLE<br/>Possible values = ENABLE, DISABLENEWLINE """NEWLINE try :NEWLINE self._redirectcomports = redirectcomportsNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def redirectpnpdevices(self) :NEWLINE r"""This setting corresponds to the selections for pnpdevices under More on the Local Resources tab under Options in RDC.<br/>Default value: DISABLE<br/>Possible values = ENABLE, DISABLE.NEWLINE """NEWLINE try :NEWLINE return self._redirectpnpdevicesNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @redirectpnpdevices.setterNEWLINE def redirectpnpdevices(self, redirectpnpdevices) :NEWLINE r"""This setting corresponds to the selections for pnpdevices under More on the Local Resources tab under Options in RDC.<br/>Default value: DISABLE<br/>Possible values = ENABLE, DISABLENEWLINE """NEWLINE try :NEWLINE self._redirectpnpdevices = redirectpnpdevicesNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def keyboardhook(self) :NEWLINE r"""This setting corresponds to the selection in the Keyboard drop-down list on the Local Resources tab under Options in RDC.<br/>Default value: InFullScreenMode<br/>Possible values = OnLocal, OnRemote, InFullScreenMode.NEWLINE """NEWLINE try :NEWLINE return self._keyboardhookNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @keyboardhook.setterNEWLINE def keyboardhook(self, keyboardhook) :NEWLINE r"""This setting corresponds to the selection in the Keyboard drop-down list on the Local Resources tab under Options in RDC.<br/>Default value: InFullScreenMode<br/>Possible values = OnLocal, OnRemote, InFullScreenModeNEWLINE """NEWLINE try :NEWLINE self._keyboardhook = keyboardhookNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def audiocapturemode(self) :NEWLINE r"""This setting corresponds to the selections in the Remote audio area on the Local Resources tab under Options in RDC.<br/>Default value: DISABLE<br/>Possible values = ENABLE, DISABLE.NEWLINE """NEWLINE try :NEWLINE return self._audiocapturemodeNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @audiocapturemode.setterNEWLINE def audiocapturemode(self, audiocapturemode) :NEWLINE r"""This setting corresponds to the selections in the Remote audio area on the Local Resources tab under Options in RDC.<br/>Default value: DISABLE<br/>Possible values = ENABLE, DISABLENEWLINE """NEWLINE try :NEWLINE self._audiocapturemode = audiocapturemodeNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def videoplaybackmode(self) :NEWLINE r"""This setting determines if Remote Desktop Connection (RDC) will use RDP efficient multimedia streaming for video playback.<br/>Default value: ENABLE<br/>Possible values = ENABLE, DISABLE.NEWLINE """NEWLINE try :NEWLINE return self._videoplaybackmodeNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @videoplaybackmode.setterNEWLINE def videoplaybackmode(self, videoplaybackmode) :NEWLINE r"""This setting determines if Remote Desktop Connection (RDC) will use RDP efficient multimedia streaming for video playback.<br/>Default value: ENABLE<br/>Possible values = ENABLE, DISABLENEWLINE """NEWLINE try :NEWLINE self._videoplaybackmode = videoplaybackmodeNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def multimonitorsupport(self) :NEWLINE r"""Enable/Disable Multiple Monitor Support for Remote Desktop Connection (RDC).<br/>Default value: ENABLE<br/>Possible values = ENABLE, DISABLE.NEWLINE """NEWLINE try :NEWLINE return self._multimonitorsupportNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @multimonitorsupport.setterNEWLINE def multimonitorsupport(self, multimonitorsupport) :NEWLINE r"""Enable/Disable Multiple Monitor Support for Remote Desktop Connection (RDC).<br/>Default value: ENABLE<br/>Possible values = ENABLE, DISABLENEWLINE """NEWLINE try :NEWLINE self._multimonitorsupport = multimonitorsupportNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def rdpcookievalidity(self) :NEWLINE r"""RDP cookie validity period. RDP cookie validity time is applicable for new connection and also for any re-connection that might happen, mostly due to network disruption or during fail-over.<br/>Default value: 60<br/>Minimum length = 1<br/>Maximum length = 86400.NEWLINE """NEWLINE try :NEWLINE return self._rdpcookievalidityNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @rdpcookievalidity.setterNEWLINE def rdpcookievalidity(self, rdpcookievalidity) :NEWLINE r"""RDP cookie validity period. RDP cookie validity time is applicable for new connection and also for any re-connection that might happen, mostly due to network disruption or during fail-over.<br/>Default value: 60<br/>Minimum length = 1<br/>Maximum length = 86400NEWLINE """NEWLINE try :NEWLINE self._rdpcookievalidity = rdpcookievalidityNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def addusernameinrdpfile(self) :NEWLINE r"""Add username in rdp file.<br/>Default value: NO<br/>Possible values = YES, NO.NEWLINE """NEWLINE try :NEWLINE return self._addusernameinrdpfileNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @addusernameinrdpfile.setterNEWLINE def addusernameinrdpfile(self, addusernameinrdpfile) :NEWLINE r"""Add username in rdp file.<br/>Default value: NO<br/>Possible values = YES, NONEWLINE """NEWLINE try :NEWLINE self._addusernameinrdpfile = addusernameinrdpfileNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def rdpfilename(self) :NEWLINE r"""RDP file name to be sent to End User.<br/>Minimum length = 1.NEWLINE """NEWLINE try :NEWLINE return self._rdpfilenameNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @rdpfilename.setterNEWLINE def rdpfilename(self, rdpfilename) :NEWLINE r"""RDP file name to be sent to End User.<br/>Minimum length = 1NEWLINE """NEWLINE try :NEWLINE self._rdpfilename = rdpfilenameNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def rdphost(self) :NEWLINE r"""Fully-qualified domain name (FQDN) of the RDP Listener.<br/>Maximum length = 252.NEWLINE """NEWLINE try :NEWLINE return self._rdphostNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @rdphost.setterNEWLINE def rdphost(self, rdphost) :NEWLINE r"""Fully-qualified domain name (FQDN) of the RDP Listener.<br/>Maximum length = 252NEWLINE """NEWLINE try :NEWLINE self._rdphost = rdphostNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def rdplistener(self) :NEWLINE r"""IP address (or) Fully-qualified domain name(FQDN) of the RDP Listener with the port in the format IP:Port (or) FQDN:Port.<br/>Maximum length = 255.NEWLINE """NEWLINE try :NEWLINE return self._rdplistenerNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @rdplistener.setterNEWLINE def rdplistener(self, rdplistener) :NEWLINE r"""IP address (or) Fully-qualified domain name(FQDN) of the RDP Listener with the port in the format IP:Port (or) FQDN:Port.<br/>Maximum length = 255NEWLINE """NEWLINE try :NEWLINE self._rdplistener = rdplistenerNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def rdpcustomparams(self) :NEWLINE r"""Option for RDP custom parameters settings (if any). Custom params needs to be separated by '&'.<br/>Default value: 0<br/>Minimum length = 1.NEWLINE """NEWLINE try :NEWLINE return self._rdpcustomparamsNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @rdpcustomparams.setterNEWLINE def rdpcustomparams(self, rdpcustomparams) :NEWLINE r"""Option for RDP custom parameters settings (if any). Custom params needs to be separated by '&'.<br/>Default value: 0<br/>Minimum length = 1NEWLINE """NEWLINE try :NEWLINE self._rdpcustomparams = rdpcustomparamsNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def psk(self) :NEWLINE r"""Pre shared key value.<br/>Default value: 0.NEWLINE """NEWLINE try :NEWLINE return self._pskNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @psk.setterNEWLINE def psk(self, psk) :NEWLINE r"""Pre shared key value.<br/>Default value: 0NEWLINE """NEWLINE try :NEWLINE self._psk = pskNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def randomizerdpfilename(self) :NEWLINE r"""Will generate unique filename everytime rdp file is downloaded by appending output of time() function in the format <rdpfileName>_<time>.rdp. This tries to avoid the pop-up for replacement of existing rdp file during each rdp connection launch, hence providing better end-user experience.<br/>Default value: NO<br/>Possible values = YES, NO.NEWLINE """NEWLINE try :NEWLINE return self._randomizerdpfilenameNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @randomizerdpfilename.setterNEWLINE def randomizerdpfilename(self, randomizerdpfilename) :NEWLINE r"""Will generate unique filename everytime rdp file is downloaded by appending output of time() function in the format <rdpfileName>_<time>.rdp. This tries to avoid the pop-up for replacement of existing rdp file during each rdp connection launch, hence providing better end-user experience.<br/>Default value: NO<br/>Possible values = YES, NONEWLINE """NEWLINE try :NEWLINE self._randomizerdpfilename = randomizerdpfilenameNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def rdplinkattribute(self) :NEWLINE r"""Citrix Gateway allows the configuration of rdpLinkAttribute parameter which can be used to fetch a list of RDP servers(IP/FQDN) that a user can access, from an Authentication server attribute(Example: LDAP, SAML). Based on the list received, the RDP links will be generated and displayed to the user.NEWLINE Note: The Attribute mentioned in the rdpLinkAttribute should be fetched through corresponding authentication method.NEWLINE """NEWLINE try :NEWLINE return self._rdplinkattributeNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @rdplinkattribute.setterNEWLINE def rdplinkattribute(self, rdplinkattribute) :NEWLINE r"""Citrix Gateway allows the configuration of rdpLinkAttribute parameter which can be used to fetch a list of RDP servers(IP/FQDN) that a user can access, from an Authentication server attribute(Example: LDAP, SAML). Based on the list received, the RDP links will be generated and displayed to the user.NEWLINE Note: The Attribute mentioned in the rdpLinkAttribute should be fetched through corresponding authentication method.NEWLINE """NEWLINE try :NEWLINE self._rdplinkattribute = rdplinkattributeNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def builtin(self) :NEWLINE r"""Indicates that a variable is a built-in (SYSTEM INTERNAL) type.<br/>Possible values = MODIFIABLE, DELETABLE, IMMUTABLE, PARTITION_ALL.NEWLINE """NEWLINE try :NEWLINE return self._builtinNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE @propertyNEWLINE def feature(self) :NEWLINE r"""The feature to be checked while applying this config.NEWLINE """NEWLINE try :NEWLINE return self._featureNEWLINE except Exception as e:NEWLINE raise eNEWLINENEWLINE def _get_nitro_response(self, service, response) :NEWLINE r""" converts nitro response into object and returns the object array in case of get request.NEWLINE """NEWLINE try :NEWLINE result = service.payload_formatter.string_to_resource(rdpclientprofile_response, response, self.__class__.__name__)NEWLINE if(result.errorcode != 0) :NEWLINE if (result.errorcode == 444) :NEWLINE service.clear_session(self)NEWLINE if result.severity :NEWLINE if (result.severity == "ERROR") :NEWLINE raise nitro_exception(result.errorcode, str(result.message), str(result.severity))NEWLINE else :NEWLINE raise nitro_exception(result.errorcode, str(result.message), str(result.severity))NEWLINE return result.rdpclientprofileNEWLINE except Exception as e :NEWLINE raise eNEWLINENEWLINE def _get_object_name(self) :NEWLINE r""" Returns the value of object identifier argumentNEWLINE """NEWLINE try :NEWLINE if self.name is not None :NEWLINE return str(self.name)NEWLINE return NoneNEWLINE except Exception as e :NEWLINE raise eNEWLINENEWLINENEWLINENEWLINE @classmethodNEWLINE def filter_add_parameters(cls, resource) :NEWLINE r""" Use this function to create a resource with only add operation specific parameters.NEWLINE """NEWLINE addresource = rdpclientprofile()NEWLINE addresource.name = resource.nameNEWLINE addresource.rdpurloverride = resource.rdpurloverrideNEWLINE addresource.redirectclipboard = resource.redirectclipboardNEWLINE addresource.redirectdrives = resource.redirectdrivesNEWLINE addresource.redirectprinters = resource.redirectprintersNEWLINE addresource.redirectcomports = resource.redirectcomportsNEWLINE addresource.redirectpnpdevices = resource.redirectpnpdevicesNEWLINE addresource.keyboardhook = resource.keyboardhookNEWLINE addresource.audiocapturemode = resource.audiocapturemodeNEWLINE addresource.videoplaybackmode = resource.videoplaybackmodeNEWLINE addresource.multimonitorsupport = resource.multimonitorsupportNEWLINE addresource.rdpcookievalidity = resource.rdpcookievalidityNEWLINE addresource.addusernameinrdpfile = resource.addusernameinrdpfileNEWLINE addresource.rdpfilename = resource.rdpfilenameNEWLINE addresource.rdphost = resource.rdphostNEWLINE addresource.rdplistener = resource.rdplistenerNEWLINE addresource.rdpcustomparams = resource.rdpcustomparamsNEWLINE addresource.psk = resource.pskNEWLINE addresource.randomizerdpfilename = resource.randomizerdpfilenameNEWLINE addresource.rdplinkattribute = resource.rdplinkattributeNEWLINE return addresourceNEWLINENEWLINE @classmethodNEWLINE def add(cls, client, resource) :NEWLINE r""" Use this API to add rdpclientprofile.NEWLINE """NEWLINE try :NEWLINE if type(resource) is not list :NEWLINE addresource = cls.filter_add_parameters(resource)NEWLINE return addresource.add_resource(client)NEWLINE else :NEWLINE if (resource and len(resource) > 0) :NEWLINE addresources = [ rdpclientprofile() for _ in range(len(resource))]NEWLINE for i in range(len(resource)) :NEWLINE addresources[i] = cls.filter_add_parameters(resource[i])NEWLINE result = cls.add_bulk_request(client, addresources)NEWLINE return resultNEWLINE except Exception as e :NEWLINE raise eNEWLINENEWLINE @classmethodNEWLINE def filter_update_parameters(cls, resource) :NEWLINE r""" Use this function to create a resource with only update operation specific parameters.NEWLINE """NEWLINE updateresource = rdpclientprofile()NEWLINE updateresource.name = resource.nameNEWLINE updateresource.rdpurloverride = resource.rdpurloverrideNEWLINE updateresource.redirectclipboard = resource.redirectclipboardNEWLINE updateresource.redirectdrives = resource.redirectdrivesNEWLINE updateresource.redirectprinters = resource.redirectprintersNEWLINE updateresource.redirectcomports = resource.redirectcomportsNEWLINE updateresource.redirectpnpdevices = resource.redirectpnpdevicesNEWLINE updateresource.keyboardhook = resource.keyboardhookNEWLINE updateresource.audiocapturemode = resource.audiocapturemodeNEWLINE updateresource.videoplaybackmode = resource.videoplaybackmodeNEWLINE updateresource.multimonitorsupport = resource.multimonitorsupportNEWLINE updateresource.rdpcookievalidity = resource.rdpcookievalidityNEWLINE updateresource.addusernameinrdpfile = resource.addusernameinrdpfileNEWLINE updateresource.rdpfilename = resource.rdpfilenameNEWLINE updateresource.rdphost = resource.rdphostNEWLINE updateresource.rdplistener = resource.rdplistenerNEWLINE updateresource.rdpcustomparams = resource.rdpcustomparamsNEWLINE updateresource.psk = resource.pskNEWLINE updateresource.randomizerdpfilename = resource.randomizerdpfilenameNEWLINE updateresource.rdplinkattribute = resource.rdplinkattributeNEWLINE return updateresourceNEWLINENEWLINE @classmethodNEWLINE def update(cls, client, resource) :NEWLINE r""" Use this API to update rdpclientprofile.NEWLINE """NEWLINE try :NEWLINE if type(resource) is not list :NEWLINE updateresource = cls.filter_update_parameters(resource)NEWLINE return updateresource.update_resource(client)NEWLINE else :NEWLINE if (resource and len(resource) > 0) :NEWLINE updateresources = [ rdpclientprofile() for _ in range(len(resource))]NEWLINE for i in range(len(resource)) :NEWLINE updateresources[i] = cls.filter_update_parameters(resource[i])NEWLINE result = cls.update_bulk_request(client, updateresources)NEWLINE return resultNEWLINE except Exception as e :NEWLINE raise eNEWLINENEWLINE @classmethodNEWLINE def unset(cls, client, resource, args) :NEWLINE r""" Use this API to unset the properties of rdpclientprofile resource.NEWLINE Properties that need to be unset are specified in args array.NEWLINE """NEWLINE try :NEWLINE if type(resource) is not list :NEWLINE unsetresource = rdpclientprofile()NEWLINE if type(resource) != type(unsetresource):NEWLINE unsetresource.name = resourceNEWLINE else :NEWLINE unsetresource.name = resource.nameNEWLINE return unsetresource.unset_resource(client, args)NEWLINE else :NEWLINE if type(resource[0]) != cls :NEWLINE if (resource and len(resource) > 0) :NEWLINE unsetresources = [ rdpclientprofile() for _ in range(len(resource))]NEWLINE for i in range(len(resource)) :NEWLINE unsetresources[i].name = resource[i]NEWLINE else :NEWLINE if (resource and len(resource) > 0) :NEWLINE unsetresources = [ rdpclientprofile() for _ in range(len(resource))]NEWLINE for i in range(len(resource)) :NEWLINE unsetresources[i].name = resource[i].nameNEWLINE result = cls.unset_bulk_request(client, unsetresources, args)NEWLINE return resultNEWLINE except Exception as e :NEWLINE raise eNEWLINENEWLINE @classmethodNEWLINE def filter_delete_parameters(cls, resource) :NEWLINE r""" Use this function to create a resource with only delete operation specific parameters.NEWLINE """NEWLINE deleteresource = rdpclientprofile()NEWLINE deleteresource.name = resource.nameNEWLINE return deleteresourceNEWLINENEWLINE @classmethodNEWLINE def delete(cls, client, resource) :NEWLINE r""" Use this API to delete rdpclientprofile.NEWLINE """NEWLINE try :NEWLINE if type(resource) is not list :NEWLINE deleteresource = rdpclientprofile()NEWLINE if type(resource) != type(deleteresource):NEWLINE deleteresource.name = resourceNEWLINE else :NEWLINE deleteresource = cls.filter_delete_parameters(resource)NEWLINE return deleteresource.delete_resource(client)NEWLINE else :NEWLINE if type(resource[0]) != cls :NEWLINE if (resource and len(resource) > 0) :NEWLINE deleteresources = [ rdpclientprofile() for _ in range(len(resource))]NEWLINE for i in range(len(resource)) :NEWLINE deleteresources[i].name = resource[i]NEWLINE else :NEWLINE if (resource and len(resource) > 0) :NEWLINE deleteresources = [ rdpclientprofile() for _ in range(len(resource))]NEWLINE for i in range(len(resource)) :NEWLINE deleteresources[i] = cls.filter_delete_parameters(resource)NEWLINE result = cls.delete_bulk_request(client, deleteresources)NEWLINE return resultNEWLINE except Exception as e :NEWLINE raise eNEWLINENEWLINE @classmethodNEWLINE def get(cls, client, name="", option_="") :NEWLINE r""" Use this API to fetch all the rdpclientprofile resources that are configured on netscaler.NEWLINE """NEWLINE try :NEWLINE if not name :NEWLINE obj = rdpclientprofile()NEWLINE response = obj.get_resources(client, option_)NEWLINE else :NEWLINE if type(name) is not list :NEWLINE if type(name) == cls :NEWLINE raise Exception('Invalid parameter name:{0}'.format(type(name)))NEWLINE obj = rdpclientprofile()NEWLINE obj.name = nameNEWLINE response = obj.get_resource(client, option_)NEWLINE else :NEWLINE if name and len(name) > 0 :NEWLINE if type(name[0]) == cls :NEWLINE raise Exception('Invalid parameter name:{0}'.format(type(name[0])))NEWLINE response = [rdpclientprofile() for _ in range(len(name))]NEWLINE obj = [rdpclientprofile() for _ in range(len(name))]NEWLINE for i in range(len(name)) :NEWLINE obj[i] = rdpclientprofile()NEWLINE obj[i].name = name[i]NEWLINE response[i] = obj[i].get_resource(client, option_)NEWLINE return responseNEWLINE except Exception as e :NEWLINE raise eNEWLINENEWLINENEWLINE @classmethodNEWLINE def get_filtered(cls, client, filter_) :NEWLINE r""" Use this API to fetch filtered set of rdpclientprofile resources.NEWLINE filter string should be in JSON format.eg: "port:80,servicetype:HTTP".NEWLINE """NEWLINE try :NEWLINE obj = rdpclientprofile()NEWLINE option_ = options()NEWLINE option_.filter = filter_NEWLINE response = obj.getfiltered(client, option_)NEWLINE return responseNEWLINE except Exception as e :NEWLINE raise eNEWLINENEWLINENEWLINE @classmethodNEWLINE def count(cls, client) :NEWLINE r""" Use this API to count the rdpclientprofile resources configured on NetScaler.NEWLINE """NEWLINE try :NEWLINE obj = rdpclientprofile()NEWLINE option_ = options()NEWLINE option_.count = TrueNEWLINE response = obj.get_resources(client, option_)NEWLINE if response :NEWLINE return response[0].__dict__['___count']NEWLINE return 0NEWLINE except Exception as e :NEWLINE raise eNEWLINENEWLINE @classmethodNEWLINE def count_filtered(cls, client, filter_) :NEWLINE r""" Use this API to count filtered the set of rdpclientprofile resources.NEWLINE Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".NEWLINE """NEWLINE try :NEWLINE obj = rdpclientprofile()NEWLINE option_ = options()NEWLINE option_.count = TrueNEWLINE option_.filter = filter_NEWLINE response = obj.getfiltered(client, option_)NEWLINE if response :NEWLINE return response[0].__dict__['___count']NEWLINE return 0NEWLINE except Exception as e :NEWLINE raise eNEWLINENEWLINENEWLINE class Rdpurloverride:NEWLINE ENABLE = "ENABLE"NEWLINE DISABLE = "DISABLE"NEWLINENEWLINE class Keyboardhook:NEWLINE OnLocal = "OnLocal"NEWLINE OnRemote = "OnRemote"NEWLINE InFullScreenMode = "InFullScreenMode"NEWLINENEWLINE class Feature:NEWLINE WL = "WL"NEWLINE WebLogging = "WebLogging"NEWLINE SP = "SP"NEWLINE SurgeProtection = "SurgeProtection"NEWLINE LB = "LB"NEWLINE LoadBalancing = "LoadBalancing"NEWLINE CS = "CS"NEWLINE ContentSwitching = "ContentSwitching"NEWLINE CR = "CR"NEWLINE CacheRedirection = "CacheRedirection"NEWLINE SC = "SC"NEWLINE SureConnect = "SureConnect"NEWLINE CMP = "CMP"NEWLINE CMPcntl = "CMPcntl"NEWLINE CompressionControl = "CompressionControl"NEWLINE PQ = "PQ"NEWLINE PriorityQueuing = "PriorityQueuing"NEWLINE HDOSP = "HDOSP"NEWLINE HttpDoSProtection = "HttpDoSProtection"NEWLINE SSLVPN = "SSLVPN"NEWLINE AAA = "AAA"NEWLINE GSLB = "GSLB"NEWLINE GlobalServerLoadBalancing = "GlobalServerLoadBalancing"NEWLINE SSL = "SSL"NEWLINE SSLOffload = "SSLOffload"NEWLINE SSLOffloading = "SSLOffloading"NEWLINE CF = "CF"NEWLINE ContentFiltering = "ContentFiltering"NEWLINE IC = "IC"NEWLINE IntegratedCaching = "IntegratedCaching"NEWLINE OSPF = "OSPF"NEWLINE OSPFRouting = "OSPFRouting"NEWLINE RIP = "RIP"NEWLINE RIPRouting = "RIPRouting"NEWLINE BGP = "BGP"NEWLINE BGPRouting = "BGPRouting"NEWLINE REWRITE = "REWRITE"NEWLINE IPv6PT = "IPv6PT"NEWLINE IPv6protocoltranslation = "IPv6protocoltranslation"NEWLINE AppFw = "AppFw"NEWLINE ApplicationFirewall = "ApplicationFirewall"NEWLINE RESPONDER = "RESPONDER"NEWLINE HTMLInjection = "HTMLInjection"NEWLINE push = "push"NEWLINE NSPush = "NSPush"NEWLINE NetScalerPush = "NetScalerPush"NEWLINE AppFlow = "AppFlow"NEWLINE CloudBridge = "CloudBridge"NEWLINE ISIS = "ISIS"NEWLINE ISISRouting = "ISISRouting"NEWLINE CH = "CH"NEWLINE CallHome = "CallHome"NEWLINE AppQoE = "AppQoE"NEWLINE ContentAccelerator = "ContentAccelerator"NEWLINE SYSTEM = "SYSTEM"NEWLINE RISE = "RISE"NEWLINE FEO = "FEO"NEWLINE LSN = "LSN"NEWLINE LargeScaleNAT = "LargeScaleNAT"NEWLINE RDPProxy = "RDPProxy"NEWLINE Rep = "Rep"NEWLINE Reputation = "Reputation"NEWLINE URLFiltering = "URLFiltering"NEWLINE VideoOptimization = "VideoOptimization"NEWLINE ForwardProxy = "ForwardProxy"NEWLINE SSLInterception = "SSLInterception"NEWLINE AdaptiveTCP = "AdaptiveTCP"NEWLINE CQA = "CQA"NEWLINE CI = "CI"NEWLINE ContentInspection = "ContentInspection"NEWLINE Bot = "Bot"NEWLINE APIGateway = "APIGateway"NEWLINENEWLINE class Redirectcomports:NEWLINE ENABLE = "ENABLE"NEWLINE DISABLE = "DISABLE"NEWLINENEWLINE class Builtin:NEWLINE MODIFIABLE = "MODIFIABLE"NEWLINE DELETABLE = "DELETABLE"NEWLINE IMMUTABLE = "IMMUTABLE"NEWLINE PARTITION_ALL = "PARTITION_ALL"NEWLINENEWLINE class Randomizerdpfilename:NEWLINE YES = "YES"NEWLINE NO = "NO"NEWLINENEWLINE class Multimonitorsupport:NEWLINE ENABLE = "ENABLE"NEWLINE DISABLE = "DISABLE"NEWLINENEWLINE class Addusernameinrdpfile:NEWLINE YES = "YES"NEWLINE NO = "NO"NEWLINENEWLINE class Videoplaybackmode:NEWLINE ENABLE = "ENABLE"NEWLINE DISABLE = "DISABLE"NEWLINENEWLINE class Redirectclipboard:NEWLINE ENABLE = "ENABLE"NEWLINE DISABLE = "DISABLE"NEWLINENEWLINE class Redirectpnpdevices:NEWLINE ENABLE = "ENABLE"NEWLINE DISABLE = "DISABLE"NEWLINENEWLINE class Redirectprinters:NEWLINE ENABLE = "ENABLE"NEWLINE DISABLE = "DISABLE"NEWLINENEWLINE class Audiocapturemode:NEWLINE ENABLE = "ENABLE"NEWLINE DISABLE = "DISABLE"NEWLINENEWLINE class Redirectdrives:NEWLINE ENABLE = "ENABLE"NEWLINE DISABLE = "DISABLE"NEWLINENEWLINEclass rdpclientprofile_response(base_response) :NEWLINE def __init__(self, length=1) :NEWLINE self.rdpclientprofile = []NEWLINE self.errorcode = 0NEWLINE self.message = ""NEWLINE self.severity = ""NEWLINE self.sessionid = ""NEWLINE self.rdpclientprofile = [rdpclientprofile() for _ in range(length)]NEWLINENEWLINE
from Node import NodeNEWLINEfrom shutil import rmtreeNEWLINE# from config_creator import create_file_structureNEWLINEfrom sesamutils import sesam_loggerNEWLINEfrom os import mkdirNEWLINEfrom git import RepoNEWLINEimport subprocessNEWLINEfrom json import dumps as dump_jsonNEWLINENEWLINENEWLINEclass Gitter:NEWLINE def __init__(self, url, username, password_or_token, folder, branch):NEWLINE self.url = urlNEWLINE self.username = usernameNEWLINE self.password_or_token = password_or_tokenNEWLINE self.folder = folderNEWLINE self.branch = branchNEWLINENEWLINE self.LOGGER = sesam_logger('Git')NEWLINENEWLINE self.repo = self.clone_repo()NEWLINENEWLINE def clone_repo(self):NEWLINE self.try_to_delete_dir(self.folder)NEWLINE url = f'https://{self.username}:{self.password_or_token}@{self.url}'NEWLINE repo = Repo.clone_from(url, self.folder, branch=self.branch)NEWLINE return repoNEWLINENEWLINE def push_if_diff(self, dry_run=False):NEWLINE if self.is_there_a_diff():NEWLINE if dry_run:NEWLINE self.LOGGER.info('Dry run! Skipping push to repo.')NEWLINE else:NEWLINE self.push()NEWLINE self.LOGGER.info('Successfully pushed to git repo!')NEWLINE else:NEWLINE self.LOGGER.info('No current diff! Skipping push to repo.')NEWLINENEWLINE def is_there_a_diff(self):NEWLINE import subprocessNEWLINE bashCommand = 'git status'NEWLINE process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE, cwd=self.repo.working_dir + '/node/')NEWLINE output, error = process.communicate()NEWLINE if output.endswith(b"working tree clean\n"):NEWLINE return FalseNEWLINE else:NEWLINE self.LOGGER.info(f'Git status result : "{output}"')NEWLINE return TrueNEWLINENEWLINE def push(self):NEWLINE self.LOGGER.debug(f"Pushing to git repo {self.repo.remote}")NEWLINE self.repo.git.add([self.repo.working_dir])NEWLINE self.repo.index.commit(message='Update based on master node config')NEWLINE origin = self.repo.remote('origin')NEWLINE origin.push()NEWLINENEWLINE def try_to_delete_dir(self, directory):NEWLINE try:NEWLINE self.LOGGER.debug(f'Deleting directory "{directory}"')NEWLINE rmtree(directory, ignore_errors=True)NEWLINE except FileNotFoundError:NEWLINE self.LOGGER.info(f'Did not delete "{directory}" because it does not exist!.')NEWLINENEWLINE def try_to_make_dir(self, directory):NEWLINE try:NEWLINE self.LOGGER.debug(f'Creating directory "{directory}"')NEWLINE mkdir(directory)NEWLINE except FileExistsError:NEWLINE self.LOGGER.info(f'Did not create "{directory}" because it already exists!')NEWLINENEWLINE def create_node_file_structure(self, node: Node, env):NEWLINE self.try_to_delete_dir(f'{self.repo.working_dir}/node')NEWLINE for p in [NEWLINE f'{self.repo.working_dir}/node/',NEWLINE f'{self.repo.working_dir}/node/pipes/',NEWLINE f'{self.repo.working_dir}/node/systems/',NEWLINE f'{self.repo.working_dir}/node/variables/'NEWLINE ]:NEWLINE self.try_to_make_dir(p)NEWLINE tmp_file = NoneNEWLINE for conf in node.conf:NEWLINE if conf['type'] == 'pipe':NEWLINE tmp_file = open(f'{self.repo.working_dir}/node/pipes/{conf["_id"]}.conf.json', 'w+')NEWLINE if 'system' in conf['type']:NEWLINE tmp_file = open(f'{self.repo.working_dir}/node/systems/{conf["_id"]}.conf.json', 'w+')NEWLINE if conf['type'] == 'metadata':NEWLINE tmp_file = open(f'{self.repo.working_dir}/node/node-metadata.conf.json', 'w+')NEWLINE tmp_file.write(dump_json(conf, indent=2))NEWLINE if len([key for key in node.upload_vars]) != 0:NEWLINE tmp_file = open(f'{self.repo.working_dir}/node/variables/variables-{env}.json', 'w+')NEWLINE tmp_file.write(dump_json(node.upload_vars, indent=2))NEWLINE
# Generated by Django 3.2.4 on 2021-06-18 02:41NEWLINENEWLINEfrom django.db import migrations, modelsNEWLINENEWLINENEWLINEclass Migration(migrations.Migration):NEWLINENEWLINE initial = TrueNEWLINENEWLINE dependencies = [NEWLINE ]NEWLINENEWLINE operations = [NEWLINE migrations.CreateModel(NEWLINE name='Project',NEWLINE fields=[NEWLINE ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),NEWLINE ('title', models.CharField(max_length=200)),NEWLINE ('description', models.TextField()),NEWLINE ('image', models.ImageField(upload_to='')),NEWLINE ('created', models.DateField(auto_now_add=True)),NEWLINE ('updated', models.DateTimeField(auto_now=True)),NEWLINE ],NEWLINE ),NEWLINE ]NEWLINE
#!/usr/bin/env pythonNEWLINE# -*- coding: utf-8 -*-NEWLINE# @Time : 2019/8/9 11:00NEWLINE# @Author : BaijbNEWLINENEWLINE"""NEWLINE demoProject 的web服务器(异步io处理高并发访问,使用单线程及协程处理)NEWLINE ---路径映射(web.Application 中 ) 和 视图处理器(jinja2)NEWLINE 1、生成数据连接池NEWLINE 2、生成web服务器对象(设置 middlewares(类似拦截器) 日志处理,返回值处理)NEWLINE 3、生成jinja2(viewResolver)NEWLINE 4、添加业务处理层(路径-方法映射)NEWLINE 5、添加静态资源访问路径NEWLINE 6、运行web 服务器对象NEWLINENEWLINE 运行流程:NEWLINE 1、客户端发送请求NEWLINE 2、web端接收请求,找到web中对应的映射的handleNEWLINE 3、循环middlewaresNEWLINE 4、NEWLINE"""NEWLINEimport asyncioNEWLINEimport jsonNEWLINEimport loggingNEWLINEimport osNEWLINEimport sysNEWLINEimport timeNEWLINEfrom datetime import datetimeNEWLINENEWLINEfrom aiohttp import webNEWLINEfrom jinja2 import Environment, FileSystemLoaderNEWLINENEWLINEfrom www import ormNEWLINEfrom www.config import configsNEWLINEfrom www.coreweb import add_routes, add_staticNEWLINENEWLINElogging.basicConfig(level=logging.INFO)NEWLINENEWLINENEWLINEdef init_jinja2(app, **kw):NEWLINE """NEWLINE 设置viewResolverNEWLINE :param app:NEWLINE :param kw:NEWLINE :return:NEWLINE """NEWLINE logging.info('init jinja2...')NEWLINE options = dict(NEWLINE autoescape=kw.get('autoescape', True),NEWLINE block_start_string=kw.get('block_start_string', '{%'),NEWLINE block_end_string=kw.get('block_end_string', '%}'),NEWLINE variable_start_string=kw.get('variable_start_string', '{{'),NEWLINE variable_end_string=kw.get('variable_end_string', '}}'),NEWLINE auto_reload=kw.get('auto_reload', True)NEWLINE )NEWLINE path = kw.get('path', None)NEWLINE if path is None:NEWLINE path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')NEWLINE logging.info('set jinja2 template path: %s' % path)NEWLINE env = Environment(loader=FileSystemLoader(path), **options)NEWLINE filters = kw.get('filters', None)NEWLINE if filters is not None:NEWLINE for name, f in filters.items():NEWLINE env.filters[name] = fNEWLINE app['__templating__'] = envNEWLINENEWLINENEWLINEasync def data_factory(app, handler):NEWLINE """NEWLINE 处理请求参数,处理成dict对象并放入request的__data__属性中NEWLINE response = await handler(request) 调用下一个处理器NEWLINE :param app:NEWLINE :param handler:NEWLINE :return:NEWLINE """NEWLINENEWLINE async def parse_data(request):NEWLINE if request.method == 'POST':NEWLINE if request.content_type.startswith('application/json'):NEWLINE request.__data__ = await request.json()NEWLINE logging.info('request json: %s' % str(request.__data__))NEWLINE elif request.content_type.startswith('application/x-www-form-urlencoded'):NEWLINE request.__data__ = await request.post()NEWLINE logging.info('request form: %s' % str(request.__data__))NEWLINE return (await handler(request))NEWLINENEWLINE return parse_dataNEWLINENEWLINENEWLINEasync def logger_factory(app, handler):NEWLINE async def logger(request):NEWLINE logging.info('Request: %s %s' % (request.method, request.path))NEWLINE return (await handler(request))NEWLINENEWLINE return loggerNEWLINENEWLINENEWLINEasync def response_factory(app, handler):NEWLINE async def response(request):NEWLINE logging.info('Response handler...')NEWLINE r = await handler(request)NEWLINE if isinstance(r, web.StreamResponse):NEWLINE return rNEWLINE if isinstance(r, bytes):NEWLINE resp = web.Response(body=r)NEWLINE resp.content_type = 'application/octet-stream'NEWLINE return respNEWLINE if isinstance(r, str):NEWLINE if r.startswith('redirect:'):NEWLINE return web.HTTPFound(r[9:])NEWLINE resp = web.Response(body=r.encode('utf-8'))NEWLINE resp.content_type = 'text/html;charset=utf-8'NEWLINE return respNEWLINE if isinstance(r, dict):NEWLINE template = r.get('__template__')NEWLINE if template is None:NEWLINE resp = web.Response(NEWLINE body=json.dumps(r, ensure_ascii=False, default=lambda o: o.__dict__).encode('utf-8'))NEWLINE resp.content_type = 'application/json;charset=utf-8'NEWLINE return respNEWLINE else:NEWLINE resp = web.Response(body=app['__templating__'].get_template(template).render(**r).encode('utf-8'))NEWLINE resp.content_type = 'text/html;charset=utf-8'NEWLINE return respNEWLINE if isinstance(r, int) and 100 <= r < 600:NEWLINE return web.Response(r)NEWLINE if isinstance(r, tuple) and len(r) == 2:NEWLINE t, m = rNEWLINE if isinstance(t, int) and 100 <= t < 600:NEWLINE return web.Response(t, str(m))NEWLINE # default:NEWLINE resp = web.Response(body=str(r).encode('utf-8'))NEWLINE resp.content_type = 'text/plain;charset=utf-8'NEWLINE return respNEWLINENEWLINE return responseNEWLINENEWLINENEWLINEdef datetime_filter(t):NEWLINE """NEWLINE jinja2自定义filter,模板文件中可以直接使用方法来输出内容NEWLINE :param t:NEWLINE :return:NEWLINE """NEWLINE delta = int(time.time() - t)NEWLINE if delta < 60:NEWLINE return u'1分钟前'NEWLINE if delta < 3600:NEWLINE return u'%s分钟前' % (delta // 60)NEWLINE if delta < 86400:NEWLINE return u'%s小时前' % (delta // 3600)NEWLINE if delta < 604800:NEWLINE return u'%s天前' % (delta // 86400)NEWLINE dt = datetime.fromtimestamp(t)NEWLINE return u'%s年%s月%s日' % (dt.year, dt.month, dt.day)NEWLINENEWLINENEWLINEasync def init(loop):NEWLINE await orm.create_pool(loop=loop, host=configs.db.host, port=configs.db.port, user=configs.db.user,NEWLINE password=configs.db.password, db=configs.db.db)NEWLINE app = web.Application(loop=loop, middlewares=[NEWLINE logger_factory, data_factory, response_factoryNEWLINE ])NEWLINE init_jinja2(app, filters=dict(datetime=datetime_filter))NEWLINE add_routes(app, *configs.web.handlers)NEWLINE add_static(app)NEWLINE runner = web.AppRunner(app)NEWLINE await runner.setup()NEWLINE port = 9000 if not configs.web.port else configs.web.portNEWLINE site = web.TCPSite(runner, '127.0.0.1', port)NEWLINE logging.info('server started at http://127.0.0.1:%s...' % port)NEWLINE await site.start()NEWLINENEWLINENEWLINE# curPath = os.path.abspath(os.path.dirname(__file__))NEWLINE# sys.path.append(os.path.split(curPath)[0])NEWLINENEWLINE# 添加模块NEWLINENEWLINENEWLINEloop = asyncio.get_event_loop()NEWLINEloop.run_until_complete(init(loop))NEWLINEloop.run_forever()NEWLINE
"""NEWLINE :codeauthor: Megan Wilhite<mwilhite@saltstack.com>NEWLINE"""NEWLINENEWLINENEWLINEimport pytestNEWLINEimport salt.modules.mac_service as mac_serviceNEWLINEfrom salt.exceptions import CommandExecutionErrorNEWLINEfrom tests.support.mixins import LoaderModuleMockMixinNEWLINEfrom tests.support.mock import MagicMock, patchNEWLINEfrom tests.support.unit import TestCaseNEWLINENEWLINENEWLINEclass MacServiceTestCase(TestCase, LoaderModuleMockMixin):NEWLINE """NEWLINE TestCase for salt.modules.mac_service moduleNEWLINE """NEWLINENEWLINE def setup_loader_modules(self):NEWLINE return {mac_service: {"__context__": {}}}NEWLINENEWLINE def test_service_disabled_when_enabled(self):NEWLINE """NEWLINE test service.disabled when service is enabledNEWLINE """NEWLINE srv_name = "com.apple.atrun"NEWLINE cmd = 'disabled services = {\n\t"com.saltstack.salt.minion" => false\n\t"com.apple.atrun" => false\n{'NEWLINE domain_ret = MagicMock(return_value=("", ""))NEWLINE with patch.object(mac_service, "_get_domain_target", domain_ret):NEWLINE with patch.object(mac_service, "launchctl", MagicMock(return_value=cmd)):NEWLINE assert mac_service.disabled(srv_name) is FalseNEWLINENEWLINE def test_service_disabled_when_disabled(self):NEWLINE """NEWLINE test service.disabled when service is disabledNEWLINE """NEWLINE srv_name = "com.apple.atrun"NEWLINE cmd = 'disabled services = {\n\t"com.saltstack.salt.minion" => false\n\t"com.apple.atrun" => true\n{'NEWLINE domain_ret = MagicMock(return_value=("", ""))NEWLINE with patch.object(mac_service, "_get_domain_target", domain_ret):NEWLINE with patch.object(mac_service, "launchctl", MagicMock(return_value=cmd)):NEWLINE assert mac_service.disabled(srv_name) is TrueNEWLINENEWLINE def test_service_disabled_srvname_wrong(self):NEWLINE """NEWLINE test service.disabled when service is just slightly wrongNEWLINE """NEWLINE srv_names = ["com.apple.atru", "com", "apple"]NEWLINE cmd = 'disabled services = {\n\t"com.saltstack.salt.minion" => false\n\t"com.apple.atrun" => true\n}'NEWLINE domain_ret = MagicMock(return_value=("", ""))NEWLINE with patch.object(mac_service, "_get_domain_target", domain_ret):NEWLINE for name in srv_names:NEWLINE with patch.object(NEWLINE mac_service, "launchctl", MagicMock(return_value=cmd)NEWLINE ):NEWLINE assert mac_service.disabled(name) is FalseNEWLINENEWLINE def test_service_disabled_status_upper_case(self):NEWLINE """NEWLINE test service.disabled when disabled status is uppercaseNEWLINE """NEWLINE srv_name = "com.apple.atrun"NEWLINE cmd = 'disabled services = {\n\t"com.saltstack.salt.minion" => false\n\t"com.apple.atrun" => True\n{'NEWLINE domain_ret = MagicMock(return_value=("", ""))NEWLINE with patch.object(mac_service, "_get_domain_target", domain_ret):NEWLINE with patch.object(mac_service, "launchctl", MagicMock(return_value=cmd)):NEWLINE assert mac_service.disabled(srv_name) is TrueNEWLINENEWLINE def test_service_enabled_when_enabled(self):NEWLINE """NEWLINE test service.enabled when not disabledNEWLINE """NEWLINE mock_cmd = MagicMock(return_value=False)NEWLINE with patch.dict(mac_service.__salt__, {"service.disabled": mock_cmd}):NEWLINE assert mac_service.enabled("com.apple.atrun") is TrueNEWLINENEWLINE def test_service_enabled_when_disabled(self):NEWLINE """NEWLINE test service.enabled if service is disabledNEWLINE """NEWLINE mock_cmd = MagicMock(return_value=True)NEWLINE with patch.dict(mac_service.__salt__, {"service.disabled": mock_cmd}):NEWLINE assert mac_service.enabled("com.apple.atrun") is FalseNEWLINENEWLINE def test_service_loaded_when_true(self):NEWLINE """NEWLINE test service.loaded with a loaded service.NEWLINE """NEWLINE mock_cmd = MagicMock(return_value="some_service_string")NEWLINE with patch.dict(mac_service.__salt__, {"service.list": mock_cmd}):NEWLINE assert mac_service.loaded("com.apple.atrun") is TrueNEWLINENEWLINE def test_service_loaded_when_false(self):NEWLINE """NEWLINE test service.loaded with an unloaded service.NEWLINE """NEWLINE mock_cmd = MagicMock(side_effect=CommandExecutionError)NEWLINE with patch.dict(mac_service.__salt__, {"service.list": mock_cmd}):NEWLINE assert mac_service.loaded("com.apple.atrun") is FalseNEWLINENEWLINE def test_service_keep_alive_pathstate_file_rm(self):NEWLINE """NEWLINE test _always_running_service when keep_aliveNEWLINE has pathstate set in plist file and file doesn't existNEWLINE """NEWLINE srv_name = "com.apple.atrun"NEWLINE info = {NEWLINE "plist": {NEWLINE "EnableTransactions": True,NEWLINE "ProgramArguments": ["/usr/libexec/ntpd-wrapper"],NEWLINE "Label": "org.ntp.ntpd",NEWLINE "KeepAlive": {"PathState": {"/private/etc/ntp.conf": True}},NEWLINE }NEWLINE }NEWLINENEWLINE with patch.object(mac_service, "show", MagicMock(return_value=info)):NEWLINE with patch("os.path.exists", MagicMock(return_value=False)):NEWLINE assert mac_service._always_running_service(srv_name) is FalseNEWLINENEWLINE def test_service_keep_alive_empty(self):NEWLINE """NEWLINE test _always_running_service when keep_aliveNEWLINE is emptyNEWLINE """NEWLINE srv_name = "com.apple.atrun"NEWLINE info = {NEWLINE "plist": {NEWLINE "EnableTransactions": True,NEWLINE "ProgramArguments": ["/usr/libexec/ntpd-wrapper"],NEWLINE "Label": "org.ntp.ntpd",NEWLINE "KeepAlive": {},NEWLINE }NEWLINE }NEWLINENEWLINE with patch.object(mac_service, "show", MagicMock(return_value=info)):NEWLINE with patch("os.path.exists", MagicMock(return_value=False)):NEWLINE assert mac_service._always_running_service(srv_name) is FalseNEWLINENEWLINE def test_service_keep_alive_pathstate_false(self):NEWLINE """NEWLINE test _always_running_service when keep_aliveNEWLINE has pathstate set in plist file and file is falseNEWLINE """NEWLINE srv_name = "com.apple.atrun"NEWLINE info = {NEWLINE "plist": {NEWLINE "EnableTransactions": True,NEWLINE "ProgramArguments": ["/usr/libexec/ntpd-wrapper"],NEWLINE "Label": "org.ntp.ntpd",NEWLINE "KeepAlive": {"PathState": {"/private/etc/ntp.conf": False}},NEWLINE }NEWLINE }NEWLINENEWLINE with patch.object(mac_service, "show", MagicMock(return_value=info)):NEWLINE with patch("os.path.exists", MagicMock(return_value=False)):NEWLINE assert mac_service._always_running_service(srv_name) is TrueNEWLINENEWLINE def test_service_keep_alive_pathstate(self):NEWLINE """NEWLINE test _always_running_service when keep_aliveNEWLINE has pathstate set in plist fileNEWLINE """NEWLINE srv_name = "com.apple.atrun"NEWLINE info = {NEWLINE "plist": {NEWLINE "EnableTransactions": True,NEWLINE "ProgramArguments": ["/usr/libexec/ntpd-wrapper"],NEWLINE "Label": "org.ntp.ntpd",NEWLINE "KeepAlive": {"PathState": {"/private/etc/ntp.conf": True}},NEWLINE }NEWLINE }NEWLINENEWLINE with patch.object(mac_service, "show", MagicMock(return_value=info)):NEWLINE with patch("os.path.exists", MagicMock(return_value=True)):NEWLINE assert mac_service._always_running_service(srv_name) is TrueNEWLINENEWLINE def test_service_keep_alive(self):NEWLINE """NEWLINE test _always_running_service when keep_alive setNEWLINE """NEWLINE srv_name = "com.apple.atrun"NEWLINE info = {NEWLINE "plist": {NEWLINE "EnableTransactions": True,NEWLINE "ProgramArguments": ["/usr/libexec/ntpd-wrapper"],NEWLINE "Label": "org.ntp.ntpd",NEWLINE "KeepAlive": True,NEWLINE }NEWLINE }NEWLINENEWLINE with patch.object(mac_service, "show", MagicMock(return_value=info)):NEWLINE assert mac_service._always_running_service(srv_name) is TrueNEWLINENEWLINE def test_service_keep_alive_false(self):NEWLINE """NEWLINE test _always_running_service when keep_alive FalseNEWLINE """NEWLINE srv_name = "com.apple.atrun"NEWLINE info = {NEWLINE "plist": {NEWLINE "EnableTransactions": True,NEWLINE "ProgramArguments": ["/usr/libexec/ntpd-wrapper"],NEWLINE "Label": "org.ntp.ntpd",NEWLINE "KeepAlive": False,NEWLINE }NEWLINE }NEWLINENEWLINE with patch.object(mac_service, "show", MagicMock(return_value=info)):NEWLINE assert mac_service._always_running_service(srv_name) is FalseNEWLINENEWLINE def test_service_keep_alive_missing(self):NEWLINE """NEWLINE test _always_running_service when keep_alive not in dictNEWLINE """NEWLINE srv_name = "com.apple.atrun"NEWLINE info = {NEWLINE "plist": {NEWLINE "EnableTransactions": True,NEWLINE "ProgramArguments": ["/usr/libexec/ntpd-wrapper"],NEWLINE "Label": "org.ntp.ntpd",NEWLINE }NEWLINE }NEWLINENEWLINE with patch.object(mac_service, "show", MagicMock(return_value=info)):NEWLINE assert mac_service._always_running_service(srv_name) is FalseNEWLINENEWLINE def test_service_keep_alive_wrong_setting(self):NEWLINE """NEWLINE test _always_running_service when keep_aliveNEWLINE has pathstate set in plist fileNEWLINE """NEWLINE srv_name = "com.apple.atrun"NEWLINE info = {NEWLINE "plist": {NEWLINE "EnableTransactions": True,NEWLINE "ProgramArguments": ["/usr/libexec/ntpd-wrapper"],NEWLINE "Label": "org.ntp.ntpd",NEWLINE "KeepAlive": {"Doesnotexist": {"doesnt_exist": True}},NEWLINE }NEWLINE }NEWLINENEWLINE with patch.object(mac_service, "show", MagicMock(return_value=info)):NEWLINE assert mac_service._always_running_service(srv_name) is FalseNEWLINENEWLINE def test_service_name_change_salt_minion(self):NEWLINE srv_name = "salt-minion"NEWLINE info = {NEWLINE "com.saltstack.salt.minion": {NEWLINE "file_name": "com.saltstack.salt.minion.plist",NEWLINE "file_path": "/Library/LaunchDaemons/com.saltstack.salt.minion.plist",NEWLINE "plist": {NEWLINE "HardResourceLimits": {"NumberOfFiles": 100000},NEWLINE "KeepAlive": True,NEWLINE "Label": "com.saltstack.salt.minion",NEWLINE "ProgramArguments": ["/opt/salt/bin/start-salt-minion.sh"],NEWLINE "RunAtLoad": True,NEWLINE "SoftResourceLimits": {"NumberOfFiles": 100000},NEWLINE },NEWLINE }NEWLINE }NEWLINE with patch.dict(NEWLINE mac_service.__utils__,NEWLINE {"mac_utils.available_services": MagicMock(return_value=info)},NEWLINE ):NEWLINE assert (NEWLINE mac_service._get_service(srv_name) == info["com.saltstack.salt.minion"]NEWLINE )NEWLINENEWLINE def test_service_name_change_salt_master(self):NEWLINE srv_name = "salt-master"NEWLINE info = {NEWLINE "com.saltstack.salt.master": {NEWLINE "file_name": "com.saltstack.salt.master.plist",NEWLINE "file_path": "/Library/LaunchDaemons/com.saltstack.salt.master.plist",NEWLINE "plist": {NEWLINE "HardResourceLimits": {"NumberOfFiles": 100000},NEWLINE "KeepAlive": True,NEWLINE "Label": "com.saltstack.salt.master",NEWLINE "ProgramArguments": ["/opt/salt/bin/start-salt-master.sh"],NEWLINE "RunAtLoad": True,NEWLINE "SoftResourceLimits": {"NumberOfFiles": 100000},NEWLINE },NEWLINE }NEWLINE }NEWLINE with patch.dict(NEWLINE mac_service.__utils__,NEWLINE {"mac_utils.available_services": MagicMock(return_value=info)},NEWLINE ):NEWLINE assert (NEWLINE mac_service._get_service(srv_name) == info["com.saltstack.salt.master"]NEWLINE )NEWLINENEWLINE def test_service_name_change_salt_api(self):NEWLINE srv_name = "salt-api"NEWLINE info = {NEWLINE "com.saltstack.salt.api": {NEWLINE "file_name": "com.saltstack.salt.api.plist",NEWLINE "file_path": "/Library/LaunchDaemons/com.saltstack.salt.api.plist",NEWLINE "plist": {NEWLINE "HardResourceLimits": {"NumberOfFiles": 100000},NEWLINE "KeepAlive": True,NEWLINE "Label": "com.saltstack.salt.api",NEWLINE "ProgramArguments": ["/opt/salt/bin/start-salt-api.sh"],NEWLINE "RunAtLoad": True,NEWLINE "SoftResourceLimits": {"NumberOfFiles": 100000},NEWLINE },NEWLINE }NEWLINE }NEWLINE with patch.dict(NEWLINE mac_service.__utils__,NEWLINE {"mac_utils.available_services": MagicMock(return_value=info)},NEWLINE ):NEWLINE assert mac_service._get_service(srv_name) == info["com.saltstack.salt.api"]NEWLINENEWLINE def test_service_name_change_salt_syndic(self):NEWLINE srv_name = "salt-syndic"NEWLINE info = {NEWLINE "com.saltstack.salt.syndic": {NEWLINE "file_name": "com.saltstack.salt.syndic.plist",NEWLINE "file_path": "/Library/LaunchDaemons/com.saltstack.salt.syndic.plist",NEWLINE "plist": {NEWLINE "HardResourceLimits": {"NumberOfFiles": 100000},NEWLINE "KeepAlive": True,NEWLINE "Label": "com.saltstack.salt.syndic",NEWLINE "ProgramArguments": ["/opt/salt/bin/start-salt-syndic.sh"],NEWLINE "RunAtLoad": True,NEWLINE "SoftResourceLimits": {"NumberOfFiles": 100000},NEWLINE },NEWLINE }NEWLINE }NEWLINE with patch.dict(NEWLINE mac_service.__utils__,NEWLINE {"mac_utils.available_services": MagicMock(return_value=info)},NEWLINE ):NEWLINE assert (NEWLINE mac_service._get_service(srv_name) == info["com.saltstack.salt.syndic"]NEWLINE )NEWLINENEWLINE def test_service_restart_already_loaded(self):NEWLINE mock_cmd = MagicMock(return_value=True)NEWLINE salt_dict = {NEWLINE "service.loaded": mock_cmd,NEWLINE "service.stop": mock_cmd,NEWLINE "service.start": mock_cmd,NEWLINE }NEWLINE with patch.dict(mac_service.__salt__, salt_dict):NEWLINE assert mac_service.restart("com.salt") is TrueNEWLINENEWLINE def test_service_restart_not_loaded(self):NEWLINE salt_dict = {NEWLINE "service.loaded": MagicMock(return_value=False),NEWLINE "service.start": MagicMock(return_value=True),NEWLINE }NEWLINE with patch.dict(mac_service.__salt__, salt_dict):NEWLINE assert mac_service.restart("com.salt") is TrueNEWLINENEWLINE def test_service_restart_failed_stop(self):NEWLINE salt_dict = {NEWLINE "service.loaded": MagicMock(return_value=True),NEWLINE "service.stop": MagicMock(side_effect=CommandExecutionError),NEWLINE }NEWLINE with patch.dict(mac_service.__salt__, salt_dict):NEWLINE with pytest.raises(CommandExecutionError):NEWLINE assert mac_service.restart("com.salt")NEWLINENEWLINE def test_service_restart_failed_start(self):NEWLINE salt_dict = {NEWLINE "service.loaded": MagicMock(return_value=False),NEWLINE "service.start": MagicMock(side_effect=CommandExecutionError),NEWLINE }NEWLINE with patch.dict(mac_service.__salt__, salt_dict):NEWLINE with pytest.raises(CommandExecutionError):NEWLINE assert mac_service.restart("com.salt")NEWLINENEWLINE def test_service_status_no_service(self):NEWLINE """NEWLINE Test service status with no service foundNEWLINE """NEWLINE with patch.object(NEWLINE mac_service, "_get_service", MagicMock(side_effect=CommandExecutionError)NEWLINE ):NEWLINE assert mac_service.status("com.salt") is FalseNEWLINENEWLINE @patch.object(mac_service, "_launch_agent", lambda _: False)NEWLINE @patch.object(mac_service, "_get_service", lambda _: {"": ""})NEWLINE @patch.object(mac_service, "_always_running_service", lambda _: True)NEWLINE def test_service_status_on_daemon_with_pid(self):NEWLINE """NEWLINE Test service status on dameon with PID.NEWLINE """NEWLINE mock_service_list = '{\n\t"LimitLoadToSessionType" = "System";\n\t"Label" = "com.salt";\n\t"OnDemand" = false;\n\t"LastExitStatus" = 0;\n\t"PID" = 218;\n\t"Program" = "/opt/salt";\n\t\t"--disable-keepalive";\n\t);\n};'NEWLINE salt_dict = {NEWLINE "service.list": MagicMock(return_value=mock_service_list),NEWLINE }NEWLINE with patch.dict(mac_service.__salt__, salt_dict):NEWLINE assert mac_service.status("com.salt") is TrueNEWLINENEWLINE @patch.object(mac_service, "_launch_agent", lambda _: True)NEWLINE @patch.object(mac_service, "_get_service", lambda _: {"": ""})NEWLINE @patch.object(mac_service, "_always_running_service", lambda _: True)NEWLINE def test_service_status_on_agent_with_pid(self):NEWLINE """NEWLINE Test service status on LaunchAgent with PID.NEWLINE """NEWLINE mock_service_list = '{\n\t"LimitLoadToSessionType" = "Aqua";\n\t"Label" = "com.salt";\n\t"OnDemand" = false;\n\t"LastExitStatus" = 19968;\n\t"PID" = 218;\n\t"Program" = "/opt/salt";\n\t"ProgramArguments" = (\n\t\t"/opt/salt";\n\t\t"--syslog";\n\t);\n};'NEWLINE salt_dict = {NEWLINE "service.list": MagicMock(return_value=mock_service_list),NEWLINE }NEWLINE utils_dict = {NEWLINE "mac_utils.console_user": MagicMock(return_value="spongebob"),NEWLINE }NEWLINE with patch.dict(mac_service.__salt__, salt_dict):NEWLINE with patch.dict(mac_service.__utils__, utils_dict):NEWLINE assert mac_service.status("com.salt") is TrueNEWLINENEWLINE @patch.object(mac_service, "_launch_agent", lambda _: True)NEWLINE @patch.object(mac_service, "_get_service", lambda _: {"": ""})NEWLINE @patch.object(mac_service, "_always_running_service", lambda _: True)NEWLINE def test_service_status_on_agent_with_no_pid_and_should_be_running(self):NEWLINE """NEWLINE Test service status on LaunchAgent with No PID and should be running.NEWLINE """NEWLINE mock_service_list = '{\n\t"LimitLoadToSessionType" = "Aqua";\n\t"Label" = "com.salt";\n\t"OnDemand" = false;\n\t"LastExitStatus" = 19968;\n\t"Program" = "/opt/salt";\n\t"ProgramArguments" = (\n\t\t"/opt/salt";\n\t\t"--syslog";\n\t);\n};'NEWLINE salt_dict = {NEWLINE "service.list": MagicMock(return_value=mock_service_list),NEWLINE }NEWLINE utils_dict = {NEWLINE "mac_utils.console_user": MagicMock(return_value="spongebob"),NEWLINE }NEWLINE with patch.dict(mac_service.__salt__, salt_dict):NEWLINE with patch.dict(mac_service.__utils__, utils_dict):NEWLINE assert mac_service.status("com.salt") is FalseNEWLINENEWLINE @patch.object(mac_service, "_launch_agent", lambda _: False)NEWLINE @patch.object(mac_service, "_get_service", lambda _: {"": ""})NEWLINE @patch.object(mac_service, "_always_running_service", lambda _: True)NEWLINE def test_service_status_on_daemon_with_no_pid_and_should_be_running(self):NEWLINE """NEWLINE Test service status on LaunchDaemon with no PID and anNEWLINE always running service that is loaded.NEWLINE """NEWLINE mock_service_list = '{\n\t"LimitLoadToSessionType" = "System";\n\t"Label" = "com.salt";\n\t"OnDemand" = false;\n\t"LastExitStatus" = 19968;\n\t"Program" = "/opt/salt.sh";\n\t"ProgramArguments" = (\n\t\t"/opt/salt.sh";\n\t\t"--disable-keepalive";\n\t);\n};'NEWLINE salt_dict = {NEWLINE "service.list": MagicMock(return_value=mock_service_list),NEWLINE }NEWLINE with patch.dict(mac_service.__salt__, salt_dict):NEWLINE assert mac_service.status("com.salt") is FalseNEWLINENEWLINE @patch.object(mac_service, "_launch_agent", lambda _: False)NEWLINE @patch.object(mac_service, "_get_service", lambda _: {"": ""})NEWLINE @patch.object(mac_service, "_always_running_service", lambda _: False)NEWLINE def test_service_status_on_daemon_with_no_pid_and_not_always_running(self):NEWLINE """NEWLINE Test service status on LaunchDaemon with no PID and not an alwaysNEWLINE running service.NEWLINE """NEWLINE mock_service_list = '{\n\t"LimitLoadToSessionType" = "System";\n\t"Label" = "com.salt";\n\t"OnDemand" = false;\n\t"LastExitStatus" = 19968;\n\t"Program" = "/opt/salt.sh";\n\t"ProgramArguments" = (\n\t\t"/opt/salt.sh";\n\t\t"--disable-keepalive";\n\t);\n};'NEWLINE salt_dict = {NEWLINE "service.list": MagicMock(return_value=mock_service_list),NEWLINE }NEWLINE with patch.dict(mac_service.__salt__, salt_dict):NEWLINE assert mac_service.status("com.salt") is TrueNEWLINENEWLINE @patch.object(mac_service, "_launch_agent", lambda _: False)NEWLINE @patch.object(mac_service, "_get_service", lambda _: {"": ""})NEWLINE @patch.object(mac_service, "_always_running_service", lambda _: False)NEWLINE def test_service_status_on_daemon_with_failing_list_check(self):NEWLINE """NEWLINE Test service status on LaunchDaemon with no PID on anNEWLINE always running service that is loaded.NEWLINE """NEWLINE mock_service_list = '{\n\t"LimitLoadToSessionType" = "System";\n\t"Label" = "com.salt";\n\t"OnDemand" = false;\n\t"LastExitStatus" = 19968;\n\t"Program" = "/opt/salt.sh";\n\t"ProgramArguments" = (\n\t\t"/opt/salt.sh";\n\t\t"--disable-keepalive";\n\t);\n};'NEWLINE salt_dict = {NEWLINE "service.list": MagicMock(side_effect=CommandExecutionError),NEWLINE }NEWLINE with patch.dict(mac_service.__salt__, salt_dict):NEWLINE assert mac_service.status("com.salt") is FalseNEWLINENEWLINE def test_get_service_on_service_dead(self):NEWLINE """NEWLINE Test service.dead changes.NEWLINE https://github.com/saltstack/salt/issues/57907NEWLINE """NEWLINE utils_dict = {NEWLINE "mac_utils.available_services": MagicMock(return_value={}),NEWLINE }NEWLINE context_dict = {NEWLINE "using_cached_services": True,NEWLINE "service.state": "dead",NEWLINE }NEWLINE name_in_service = MagicMock(side_effect=[{}, {"com.salt": True}])NEWLINE with patch.dict(mac_service.__utils__, utils_dict):NEWLINE with patch.object(mac_service, "_name_in_services", name_in_service):NEWLINE with patch.dict(mac_service.__context__, context_dict):NEWLINE with pytest.raises(CommandExecutionError):NEWLINE assert mac_service._get_service("com.salt")NEWLINE # find the service on a second go with no service.deadNEWLINE with patch.dict(mac_service.__context__, {}):NEWLINE assert mac_service._get_service("com.salt") == {"com.salt": True}NEWLINE
import sysNEWLINEimport numpy as npNEWLINEfrom acd.util import tiling_2d as tilingNEWLINEfrom acd.scores.cd import cd, cd_textNEWLINEfrom skimage import measure # for connected componentsNEWLINEfrom math import ceilNEWLINEfrom scipy.signal import convolve2dNEWLINEfrom copy import deepcopyNEWLINEfrom acd.scores import score_funcsNEWLINENEWLINENEWLINE# score doesn't have to just be prediction for labelNEWLINEdef refine_scores(scores, lab_num):NEWLINE return scores[:, lab_num]NEWLINENEWLINENEWLINE# higher scores are more likely to be pickedNEWLINEdef threshold_scores(scores, percentile_include, method):NEWLINE X = scoresNEWLINENEWLINE # pick more when more is already pickedNEWLINE num_picked = np.sum(np.isnan(scores))NEWLINE if num_picked > scores.size / 3:NEWLINE percentile_include -= 15NEWLINENEWLINE thresh = np.nanpercentile(X, percentile_include)NEWLINE # thresh = np.max(X) # pick only 1 pixel at a timeNEWLINE im_thresh = np.logical_and(scores >= thresh, ~np.isnan(scores))NEWLINE # scores >= thresh #np.logical_and(scores >= thresh, scores != 0)NEWLINENEWLINE # make sure we pick somethingNEWLINE while np.sum(im_thresh) == 0:NEWLINE percentile_include -= 4NEWLINE thresh = np.nanpercentile(X, percentile_include)NEWLINE # thresh = np.max(X) # pick only 1 pixel at a timeNEWLINE im_thresh = np.logical_and(scores >= thresh, ~np.isnan(scores))NEWLINE # np.logical_and(scores >= thresh, scores != 0)NEWLINE return im_threshNEWLINENEWLINENEWLINE# if 3 sides of a pixel are selected, also select the pixelNEWLINEfilt = np.zeros((3, 3))NEWLINEfilt[:, 1] = 1 # middle columnNEWLINEfilt[1, :] = 1 # middle rowNEWLINENEWLINENEWLINEdef smooth_im_thresh(im_thresh_old, im_thresh):NEWLINE im = im_thresh_old + im_threshNEWLINE im_count_neighbors = convolve2d(im, filt, mode='same')NEWLINE pixels_to_add = np.logical_and(np.logical_not(im), im_count_neighbors >= 3)NEWLINE return im + pixels_to_addNEWLINENEWLINENEWLINE# establish correspondence between segsNEWLINEdef establish_correspondence(seg1, seg2):NEWLINE seg_out = np.zeros(seg1.shape, dtype='int64')NEWLINE new_counter = 0NEWLINENEWLINE num_segs = int(np.max(seg2))NEWLINE remaining = list(range(1, 12)) # only have 10 colors thoughNEWLINE for i in range(1, num_segs + 1):NEWLINE seg = seg2 == iNEWLINE old_seg = seg1[seg]NEWLINE matches = np.unique(old_seg[old_seg != 0])NEWLINE num_matches = matches.sizeNEWLINENEWLINE # new segNEWLINE if num_matches == 0:NEWLINE new_counter -= 1NEWLINE seg_out[seg] = new_counterNEWLINENEWLINE # 1 matchNEWLINE elif num_matches == 1:NEWLINE seg_out[seg] = matches[0]NEWLINE remaining.remove(matches[0])NEWLINENEWLINE # >1 matches (segs merged)NEWLINE else:NEWLINE seg_out[seg] = min(matches)NEWLINE remaining.remove(min(matches))NEWLINENEWLINE # assign new segsNEWLINE while new_counter < 0:NEWLINE seg_out[seg_out == new_counter] = min(remaining)NEWLINE remaining.remove(min(remaining))NEWLINE new_counter += 1NEWLINENEWLINE return seg_out # seg2NEWLINENEWLINENEWLINE# agglomerate - black out selected pixels from before and resweep over the entire imageNEWLINEdef agglomerate(model, pred_ims, percentile_include, method, sweep_dim,NEWLINE im_orig, lab_num, num_iters=5, im_torch=None, model_type='mnist', device='cuda'):NEWLINE # set up shapesNEWLINE R = im_orig.shape[0]NEWLINE C = im_orig.shape[1]NEWLINE size_downsampled = (ceil(R / sweep_dim), ceil(C / sweep_dim)) # effectively downsampledNEWLINENEWLINE # get scoresNEWLINE tiles = tiling.gen_tiles(im_orig, fill=0, method=method, sweep_dim=sweep_dim)NEWLINE scores_orig_raw = score_funcs.get_scores_2d(model, method, ims=tiles, im_torch=im_torch,NEWLINE pred_ims=pred_ims, model_type=model_type, device=device)NEWLINE scores_track = np.copy(refine_scores(scores_orig_raw, lab_num)).reshape(NEWLINE size_downsampled) # keep track of these scoresNEWLINENEWLINE # threshold imNEWLINE im_thresh = threshold_scores(scores_track, percentile_include, method)NEWLINENEWLINE # initialize listsNEWLINE scores_list = [np.copy(scores_track)]NEWLINE im_thresh_list = [im_thresh]NEWLINE comps_list = []NEWLINE if not method == 'cd':NEWLINE comp_scores_raw_list = [{0: score_funcs.get_scores_2d(model, 'build_up',NEWLINE ims=np.expand_dims(im_orig, 0), # score for full imageNEWLINE im_torch=im_torch, pred_ims=pred_ims,NEWLINE model_type=model_type, device=device)[0]}]NEWLINE else:NEWLINE comp_scores_raw_list = [{0: score_funcs.get_scores_2d(model, method,NEWLINE ims=np.expand_dims(np.ones(im_orig.transpose().shape), 0),NEWLINE # score for full imageNEWLINE im_torch=im_torch, pred_ims=pred_ims,NEWLINE model_type=model_type, device=device)[0]}]NEWLINE comp_scores_raw_combined_list = []NEWLINENEWLINE # iterateNEWLINE for step in range(num_iters):NEWLINE # if already selected all pixels then breakNEWLINE if np.sum(im_thresh_list[-1]) == R * C:NEWLINE breakNEWLINENEWLINE # find connected components for regionsNEWLINE comps = np.copy(measure.label(im_thresh_list[-1], background=0, connectivity=2))NEWLINENEWLINE # establish correspondenceNEWLINE if step > 0:NEWLINE comps_orig = np.copy(comps)NEWLINE try:NEWLINE comps = establish_correspondence(comps_list[-1], comps_orig)NEWLINE except:NEWLINE comps = comps_origNEWLINE # plt.imshow(comps)NEWLINE # plt.show()NEWLINENEWLINE comp_tiles = {} # stores tiles corresponding to each tileNEWLINE if not method == 'cd':NEWLINE comps_combined_tile = np.zeros(shape=im_orig.shape) # stores all comp tiles combinedNEWLINE else:NEWLINE comps_combined_tile = np.zeros(shape=(R, C)) # stores all comp tiles combinedNEWLINE comp_surround_tiles = {} # stores tiles around comp_tilesNEWLINE comp_surround_idxs = {}NEWLINENEWLINE # make tilesNEWLINE comp_nums = np.unique(comps)NEWLINE comp_nums = comp_nums[comp_nums > 0] # remove 0NEWLINE for comp_num in comp_nums:NEWLINE if comp_num > 0:NEWLINE # make component tileNEWLINE comp_tile_downsampled = (comps == comp_num)NEWLINE comp_tiles[comp_num] = tiling.gen_tile_from_comp(im_orig, comp_tile_downsampled,NEWLINE sweep_dim, method) # this is full sizeNEWLINE comp_tile_binary = tiling.gen_tile_from_comp(im_orig, comp_tile_downsampled,NEWLINE sweep_dim, 'cd') # this is full sizeNEWLINE # print('comps sizes', comps_combined_tile.shape, comp_tiles[comp_num].shape)NEWLINE comps_combined_tile += comp_tiles[comp_num]NEWLINENEWLINE # generate tiles and corresponding idxs around componentNEWLINE comp_surround_tiles[comp_num], comp_surround_idxs[comp_num] = \NEWLINE tiling.gen_tiles_around_baseline(im_orig, comp_tile_binary, method=method, sweep_dim=sweep_dim)NEWLINENEWLINE # predict for all tilesNEWLINE comp_scores_raw_dict = {} # dictionary of {comp_num: comp_score}NEWLINE for comp_num in comp_nums:NEWLINE tiles = np.concatenate((np.expand_dims(comp_tiles[comp_num], 0), # baseline tile at 0NEWLINE np.expand_dims(comps_combined_tile, 0), # combined tile at 1NEWLINE comp_surround_tiles[comp_num])) # all others afterwardsNEWLINE scores_raw = score_funcs.get_scores_2d(model, method, ims=tiles, im_torch=im_torch,NEWLINE pred_ims=pred_ims, model_type=model_type)NEWLINENEWLINE # decipher scoresNEWLINE score_comp = np.copy(refine_scores(scores_raw, lab_num)[0])NEWLINE scores_tiles = np.copy(refine_scores(scores_raw, lab_num)[2:])NEWLINENEWLINE # store the predicted class scoresNEWLINE comp_scores_raw_dict[comp_num] = np.copy(scores_raw[0])NEWLINE score_comps_raw_combined = np.copy(scores_raw[1])NEWLINENEWLINE # update pixel scoresNEWLINE tiles_idxs = comp_surround_idxs[comp_num]NEWLINE for i in range(len(scores_tiles)):NEWLINE (r, c) = tiles_idxs[i]NEWLINE scores_track[r, c] = np.max(scores_tiles[i] - score_comp) # todo: subtract off previous comp / weight?NEWLINENEWLINE # get class preds and thresholded imageNEWLINE scores_track[im_thresh_list[-1]] = np.nanNEWLINE im_thresh = threshold_scores(scores_track, percentile_include, method)NEWLINE im_thresh_smoothed = smooth_im_thresh(im_thresh_list[-1], im_thresh)NEWLINENEWLINE # add to listsNEWLINE scores_list.append(np.copy(scores_track))NEWLINE im_thresh_list.append(im_thresh_smoothed)NEWLINE comps_list.append(comps)NEWLINE comp_scores_raw_list.append(comp_scores_raw_dict)NEWLINE comp_scores_raw_combined_list.append(score_comps_raw_combined)NEWLINENEWLINE # pad first imageNEWLINE comps_list = [np.zeros(im_orig.shape)] + comps_listNEWLINENEWLINE lists = {'scores_list': scores_list, # float arrs of scores tracked over time (NaN for already picked)NEWLINE 'im_thresh_list': im_thresh_list, # boolean array of selected pixels over timeNEWLINE 'comps_list': comps_list, # numpy arrs (each component is a different number, 0 for background)NEWLINE 'comp_scores_raw_list': comp_scores_raw_list, # dicts, each key is a number corresponding to a componentNEWLINE 'comp_scores_raw_combined_list': comp_scores_raw_combined_list,NEWLINE # arrs representing scores for all current comps combinedNEWLINE 'scores_orig_raw': scores_orig_raw,NEWLINE 'num_before_final': len(im_thresh_list)} # one arr with original scores of pixelsNEWLINE lists = agglomerate_final(lists, model, pred_ims, percentile_include, method, sweep_dim,NEWLINE im_orig, lab_num, num_iters=5, im_torch=im_torch, model_type=model_type)NEWLINENEWLINE return listsNEWLINENEWLINENEWLINE# agglomerate the final blobsNEWLINEdef agglomerate_final(lists, model, pred_ims, percentile_include, method, sweep_dim,NEWLINE im_orig, lab_num, num_iters=5, im_torch=None, model_type='mnist'):NEWLINE # while multiple types of blobsNEWLINE while (np.unique(lists['comps_list'][-1]).size > 2):NEWLINE # for q in range(3):NEWLINE comps = np.copy(lists['comps_list'][-1])NEWLINE comp_scores_raw_dict = deepcopy(lists['comp_scores_raw_list'][-1])NEWLINENEWLINE # todo: initially merge really small blobs with nearest big blobsNEWLINE # if q == 0:NEWLINENEWLINE # make tiles by combining pairs in compsNEWLINE comp_tiles = {} # stores tiles corresponding to each tileNEWLINE for comp_num in np.unique(comps):NEWLINE if comp_num > 0:NEWLINE # make component tileNEWLINE comp_tile_downsampled = (comps == comp_num)NEWLINE comp_tiles[comp_num] = tiling.gen_tile_from_comp(im_orig, comp_tile_downsampled,NEWLINE sweep_dim, method) # this is full sizeNEWLINENEWLINE # make combined tilesNEWLINE comp_tiles_comb = {}NEWLINE for comp_num1 in np.unique(comps):NEWLINE for comp_num2 in np.unique(comps):NEWLINE if 0 < comp_num1 < comp_num2:NEWLINE comp_tiles_comb[(comp_num1, comp_num2)] = tiling.combine_tiles(comp_tiles[comp_num1],NEWLINE comp_tiles[comp_num2], method)NEWLINENEWLINE # predict for all tilesNEWLINE comp_max_score_diff = -1e10NEWLINE comp_max_key_pair = NoneNEWLINE comp_max_scores_raw = NoneNEWLINE for key in comp_tiles_comb.keys():NEWLINE # calculate scoresNEWLINE tiles = 1.0 * np.expand_dims(comp_tiles_comb[key], 0)NEWLINE scores_raw = score_funcs.get_scores_2d(model, method, ims=tiles, im_torch=im_torch,NEWLINE pred_ims=pred_ims, model_type=model_type)NEWLINENEWLINE # refine scores for correct class - todo this doesn't work with refine_scoresNEWLINE score_comp = np.copy(refine_scores(scores_raw, lab_num)[0])NEWLINE # score_orig = np.max(refine_scores(np.expand_dims(comp_scores_raw_dict[key[0]], 0), lab_num)[0],NEWLINE # refine_scores(np.expand_dims(comp_scores_raw_dict[key[1]], 0), lab_num)[0])NEWLINE score_orig = max(comp_scores_raw_dict[key[0]][lab_num], comp_scores_raw_dict[key[1]][lab_num])NEWLINE score_diff = score_comp - score_origNEWLINENEWLINE # find best scoreNEWLINE if score_diff > comp_max_score_diff:NEWLINE comp_max_score_diff = score_diffNEWLINE comp_max_key_pair = keyNEWLINE comp_max_scores_raw = np.copy(scores_raw[0]) # store the predicted class scoresNEWLINENEWLINE # merge highest scoring blob pairNEWLINE comps[comps == comp_max_key_pair[1]] = comp_max_key_pair[0]NEWLINENEWLINE # update highest scoring blob pair scoreNEWLINE comp_scores_raw_dict[comp_max_key_pair[0]] = comp_max_scores_rawNEWLINE comp_scores_raw_dict.pop(comp_max_key_pair[1])NEWLINENEWLINE # add to listsNEWLINE lists['comps_list'].append(comps)NEWLINE lists['comp_scores_raw_list'].append(comp_scores_raw_dict)NEWLINE lists['scores_list'].append(lists['scores_list'][-1])NEWLINE lists['im_thresh_list'].append(lists['im_thresh_list'][-1])NEWLINE lists['comp_scores_raw_combined_list'].append(lists['comp_scores_raw_combined_list'][-1])NEWLINENEWLINE return listsNEWLINE
# pylint: disable=too-many-lines,redefined-outer-nameNEWLINEimport enumNEWLINEimport osNEWLINEimport sysNEWLINEimport tracebackNEWLINEfrom collections import defaultdictNEWLINEfrom enum import EnumNEWLINEfrom queue import QueueNEWLINEfrom typing import Any, Callable, Dict, Generator, Iterable, List, NoReturn, Optional, Tuple, UnionNEWLINEfrom urllib.parse import urlparseNEWLINENEWLINEimport attrNEWLINEimport clickNEWLINEimport hypothesisNEWLINEimport requestsNEWLINEimport yamlNEWLINENEWLINEfrom .. import checks as checks_moduleNEWLINEfrom .. import fixups as _fixupsNEWLINEfrom .. import runner, serviceNEWLINEfrom .. import targets as targets_moduleNEWLINEfrom ..constants import (NEWLINE DEFAULT_DATA_GENERATION_METHODS,NEWLINE DEFAULT_RESPONSE_TIMEOUT,NEWLINE DEFAULT_STATEFUL_RECURSION_LIMIT,NEWLINE HYPOTHESIS_IN_MEMORY_DATABASE_IDENTIFIER,NEWLINE CodeSampleStyle,NEWLINE DataGenerationMethod,NEWLINE)NEWLINEfrom ..exceptions import HTTPError, SchemaLoadingErrorNEWLINEfrom ..fixups import ALL_FIXUPSNEWLINEfrom ..hooks import GLOBAL_HOOK_DISPATCHER, HookContext, HookDispatcher, HookScopeNEWLINEfrom ..models import Case, CheckFunctionNEWLINEfrom ..runner import events, prepare_hypothesis_settingsNEWLINEfrom ..schemas import BaseSchemaNEWLINEfrom ..specs.graphql import loaders as gql_loadersNEWLINEfrom ..specs.graphql.schemas import GraphQLSchemaNEWLINEfrom ..specs.openapi import loaders as oas_loadersNEWLINEfrom ..stateful import StatefulNEWLINEfrom ..targets import TargetNEWLINEfrom ..types import Filter, PathLike, RequestCertNEWLINEfrom ..utils import GenericResponse, file_exists, get_requests_auth, import_appNEWLINEfrom . import callbacks, cassettes, outputNEWLINEfrom .constants import DEFAULT_WORKERS, MAX_WORKERS, MIN_WORKERSNEWLINEfrom .context import ExecutionContext, ServiceContextNEWLINEfrom .debug import DebugOutputHandlerNEWLINEfrom .handlers import EventHandlerNEWLINEfrom .junitxml import JunitXMLHandlerNEWLINEfrom .options import CsvChoice, CsvEnumChoice, CustomHelpMessageChoice, NotSet, OptionalIntNEWLINENEWLINEtry:NEWLINE from yaml import CSafeLoader as SafeLoaderNEWLINEexcept ImportError:NEWLINE # pylint: disable=unused-importNEWLINE from yaml import SafeLoader # type: ignoreNEWLINENEWLINENEWLINEdef _get_callable_names(items: Tuple[Callable, ...]) -> Tuple[str, ...]:NEWLINE return tuple(item.__name__ for item in items)NEWLINENEWLINENEWLINECONTEXT_SETTINGS = {"help_option_names": ["-h", "--help"]}NEWLINENEWLINEDEFAULT_CHECKS_NAMES = _get_callable_names(checks_module.DEFAULT_CHECKS)NEWLINEALL_CHECKS_NAMES = _get_callable_names(checks_module.ALL_CHECKS)NEWLINECHECKS_TYPE = CsvChoice((*ALL_CHECKS_NAMES, "all"))NEWLINENEWLINEDEFAULT_TARGETS_NAMES = _get_callable_names(targets_module.DEFAULT_TARGETS)NEWLINEALL_TARGETS_NAMES = _get_callable_names(targets_module.ALL_TARGETS)NEWLINETARGETS_TYPE = click.Choice((*ALL_TARGETS_NAMES, "all"))NEWLINENEWLINEDATA_GENERATION_METHOD_TYPE = click.Choice([item.name for item in DataGenerationMethod])NEWLINENEWLINEDEPRECATED_CASSETTE_PATH_OPTION_WARNING = (NEWLINE "Warning: Option `--store-network-log` is deprecated and will be removed in Schemathesis 4.0. "NEWLINE "Use `--cassette-path` instead."NEWLINE)NEWLINECASSETTES_PATH_INVALID_USAGE_MESSAGE = "Can't use `--store-network-log` and `--cassette-path` simultaneously"NEWLINENEWLINENEWLINEdef register_target(function: Target) -> Target:NEWLINE """Register a new testing target for schemathesis CLI.NEWLINENEWLINE :param function: A function that will be called to calculate a metric passed to ``hypothesis.target``.NEWLINE """NEWLINE targets_module.ALL_TARGETS += (function,)NEWLINE TARGETS_TYPE.choices += (function.__name__,) # type: ignoreNEWLINE return functionNEWLINENEWLINENEWLINEdef register_check(function: CheckFunction) -> CheckFunction:NEWLINE """Register a new check for schemathesis CLI.NEWLINENEWLINE :param function: A function to validate API responses.NEWLINENEWLINE .. code-block:: pythonNEWLINENEWLINE @schemathesis.register_checkNEWLINE def new_check(response, case):NEWLINE # some awesome assertions!NEWLINE ...NEWLINE """NEWLINE checks_module.ALL_CHECKS += (function,)NEWLINE CHECKS_TYPE.choices += (function.__name__,) # type: ignoreNEWLINE return functionNEWLINENEWLINENEWLINEdef reset_checks() -> None:NEWLINE """Get checks list to their default state."""NEWLINE # Useful in testsNEWLINE checks_module.ALL_CHECKS = checks_module.DEFAULT_CHECKS + checks_module.OPTIONAL_CHECKSNEWLINE CHECKS_TYPE.choices = _get_callable_names(checks_module.ALL_CHECKS) + ("all",)NEWLINENEWLINENEWLINEdef reset_targets() -> None:NEWLINE """Get targets list to their default state."""NEWLINE # Useful in testsNEWLINE targets_module.ALL_TARGETS = targets_module.DEFAULT_TARGETS + targets_module.OPTIONAL_TARGETSNEWLINE TARGETS_TYPE.choices = _get_callable_names(targets_module.ALL_TARGETS) + ("all",)NEWLINENEWLINENEWLINEclass DeprecatedOption(click.Option):NEWLINE def __init__(self, *args: Any, removed_in: str, **kwargs: Any) -> None:NEWLINE super().__init__(*args, **kwargs)NEWLINE self.removed_in = removed_inNEWLINENEWLINE def handle_parse_result(self, ctx: click.Context, opts: Dict[str, Any], args: List[str]) -> Tuple[Any, List[str]]:NEWLINE if self.name in opts:NEWLINE opt_names = "/".join(f"`{name}`" for name in self.opts)NEWLINE verb = "is" if len(self.opts) == 1 else "are"NEWLINE click.secho(NEWLINE f"\nWARNING: {opt_names} {verb} deprecated and will be removed in Schemathesis {self.removed_in}\n",NEWLINE fg="yellow",NEWLINE )NEWLINE return super().handle_parse_result(ctx, opts, args)NEWLINENEWLINENEWLINE@click.group(context_settings=CONTEXT_SETTINGS)NEWLINE@click.option("--pre-run", help="A module to execute before the running the tests.", type=str)NEWLINE@click.version_option()NEWLINEdef schemathesis(pre_run: Optional[str] = None) -> None:NEWLINE """Command line tool for testing your web application built with Open API / GraphQL specifications."""NEWLINE if pre_run:NEWLINE load_hook(pre_run)NEWLINENEWLINENEWLINEclass ParameterGroup(enum.Enum):NEWLINE filtering = "Filtering", "These options define what parts of the API will be tested."NEWLINE validation = "Validation", "Options, responsible for how responses & schemas will be checked."NEWLINE hypothesis = "Hypothesis", "Configuration of the underlying Hypothesis engine."NEWLINE generic = "Generic", NoneNEWLINENEWLINENEWLINEclass CommandWithCustomHelp(click.Command):NEWLINE def format_options(self, ctx: click.Context, formatter: click.HelpFormatter) -> None:NEWLINE # Group options firstNEWLINE groups = defaultdict(list)NEWLINE for param in self.get_params(ctx):NEWLINE rv = param.get_help_record(ctx)NEWLINE if rv is not None:NEWLINE if isinstance(param, GroupedOption):NEWLINE group = param.groupNEWLINE else:NEWLINE group = ParameterGroup.genericNEWLINE groups[group].append(rv)NEWLINE # Then display groups separately with optional descriptionNEWLINE for group in ParameterGroup:NEWLINE opts = groups[group]NEWLINE group_name, description = group.valueNEWLINE with formatter.section(f"{group_name} options"):NEWLINE if description:NEWLINE formatter.write_paragraph()NEWLINE formatter.write_text(description)NEWLINE formatter.write_paragraph()NEWLINE formatter.write_dl(opts)NEWLINENEWLINENEWLINEclass GroupedOption(click.Option):NEWLINE def __init__(self, *args: Any, group: ParameterGroup, **kwargs: Any):NEWLINE super().__init__(*args, **kwargs)NEWLINE self.group = groupNEWLINENEWLINENEWLINEwith_request_tls_verify = click.option(NEWLINE "--request-tls-verify",NEWLINE help="Controls whether Schemathesis verifies the server's TLS certificate. "NEWLINE "You can also pass the path to a CA_BUNDLE file for private certs.",NEWLINE type=str,NEWLINE default="true",NEWLINE show_default=True,NEWLINE callback=callbacks.convert_request_tls_verify,NEWLINE)NEWLINEwith_request_cert = click.option(NEWLINE "--request-cert",NEWLINE help="File path of unencrypted client certificate for authentication. "NEWLINE "The certificate can be bundled with a private key (e.g. PEM) or the private "NEWLINE "key can be provided with the --request-cert-key argument.",NEWLINE type=click.Path(exists=True),NEWLINE default=None,NEWLINE show_default=False,NEWLINE)NEWLINEwith_request_cert_key = click.option(NEWLINE "--request-cert-key",NEWLINE help="File path of the private key of the client certificate.",NEWLINE type=click.Path(exists=True),NEWLINE default=None,NEWLINE show_default=False,NEWLINE callback=callbacks.validate_request_cert_key,NEWLINE)NEWLINEwith_hosts_file = click.option(NEWLINE "--hosts-file",NEWLINE help="Path to a file to store the Schemathesis.io auth configuration.",NEWLINE type=click.Path(dir_okay=False, writable=True),NEWLINE default=service.DEFAULT_HOSTS_PATH,NEWLINE envvar=service.HOSTS_PATH_ENV_VAR,NEWLINE)NEWLINENEWLINENEWLINE@schemathesis.command(short_help="Perform schemathesis test.", cls=CommandWithCustomHelp)NEWLINE@click.argument("schema", type=str)NEWLINE@click.argument("api_slug", type=str, required=False)NEWLINE@click.option(NEWLINE "--checks",NEWLINE "-c",NEWLINE multiple=True,NEWLINE help="List of checks to run.",NEWLINE type=CHECKS_TYPE,NEWLINE default=DEFAULT_CHECKS_NAMES,NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.validation,NEWLINE callback=callbacks.convert_checks,NEWLINE show_default=True,NEWLINE)NEWLINE@click.option(NEWLINE "--data-generation-method",NEWLINE "-D",NEWLINE "data_generation_methods",NEWLINE help="Defines how Schemathesis generates data for tests.",NEWLINE type=DATA_GENERATION_METHOD_TYPE,NEWLINE default=DataGenerationMethod.default(),NEWLINE callback=callbacks.convert_data_generation_method,NEWLINE show_default=True,NEWLINE)NEWLINE@click.option(NEWLINE "--max-response-time",NEWLINE help="A custom check that will fail if the response time is greater than the specified one in milliseconds.",NEWLINE type=click.IntRange(min=1),NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.validation,NEWLINE)NEWLINE@click.option(NEWLINE "--target",NEWLINE "-t",NEWLINE "targets",NEWLINE multiple=True,NEWLINE help="Targets for input generation.",NEWLINE type=TARGETS_TYPE,NEWLINE default=DEFAULT_TARGETS_NAMES,NEWLINE show_default=True,NEWLINE)NEWLINE@click.option(NEWLINE "-x",NEWLINE "--exitfirst",NEWLINE "exit_first",NEWLINE is_flag=True,NEWLINE default=False,NEWLINE help="Exit instantly on first error or failed test.",NEWLINE show_default=True,NEWLINE)NEWLINE@click.option(NEWLINE "--dry-run",NEWLINE "dry_run",NEWLINE is_flag=True,NEWLINE default=False,NEWLINE help="Disable sending data to the application and checking responses. "NEWLINE "Helpful to verify whether data is generated at all.",NEWLINE)NEWLINE@click.option(NEWLINE "--auth", "-a", help="Server user and password. Example: USER:PASSWORD", type=str, callback=callbacks.validate_authNEWLINE)NEWLINE@click.option(NEWLINE "--auth-type",NEWLINE "-A",NEWLINE type=click.Choice(["basic", "digest"], case_sensitive=False),NEWLINE default="basic",NEWLINE help="The authentication mechanism to be used. Defaults to 'basic'.",NEWLINE show_default=True,NEWLINE)NEWLINE@click.option(NEWLINE "--header",NEWLINE "-H",NEWLINE "headers",NEWLINE help=r"Custom header that will be used in all requests to the server. Example: Authorization: Bearer\ 123",NEWLINE multiple=True,NEWLINE type=str,NEWLINE callback=callbacks.validate_headers,NEWLINE)NEWLINE@click.option(NEWLINE "--endpoint",NEWLINE "-E",NEWLINE "endpoints",NEWLINE type=str,NEWLINE multiple=True,NEWLINE help=r"Filter schemathesis tests by API operation path pattern. Example: users/\d+",NEWLINE callback=callbacks.validate_regex,NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.filtering,NEWLINE)NEWLINE@click.option(NEWLINE "--method",NEWLINE "-M",NEWLINE "methods",NEWLINE type=str,NEWLINE multiple=True,NEWLINE help="Filter schemathesis tests by HTTP method.",NEWLINE callback=callbacks.validate_regex,NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.filtering,NEWLINE)NEWLINE@click.option(NEWLINE "--tag",NEWLINE "-T",NEWLINE "tags",NEWLINE type=str,NEWLINE multiple=True,NEWLINE help="Filter schemathesis tests by schema tag pattern.",NEWLINE callback=callbacks.validate_regex,NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.filtering,NEWLINE)NEWLINE@click.option(NEWLINE "--operation-id",NEWLINE "-O",NEWLINE "operation_ids",NEWLINE type=str,NEWLINE multiple=True,NEWLINE help="Filter schemathesis tests by operationId pattern.",NEWLINE callback=callbacks.validate_regex,NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.filtering,NEWLINE)NEWLINE@click.option(NEWLINE "--workers",NEWLINE "-w",NEWLINE "workers_num",NEWLINE help="Number of workers to run tests.",NEWLINE type=CustomHelpMessageChoice(NEWLINE ["auto"] + list(map(str, range(MIN_WORKERS, MAX_WORKERS + 1))),NEWLINE choices_repr=f"[auto|{MIN_WORKERS}-{MAX_WORKERS}]",NEWLINE ),NEWLINE default=str(DEFAULT_WORKERS),NEWLINE show_default=True,NEWLINE callback=callbacks.convert_workers,NEWLINE)NEWLINE@click.option(NEWLINE "--base-url",NEWLINE "-b",NEWLINE help="Base URL address of the API, required for SCHEMA if specified by file.",NEWLINE type=str,NEWLINE callback=callbacks.validate_base_url,NEWLINE)NEWLINE@click.option("--app", help="WSGI/ASGI application to test.", type=str, callback=callbacks.validate_app)NEWLINE@click.option(NEWLINE "--request-timeout",NEWLINE help="Timeout in milliseconds for network requests during the test run.",NEWLINE type=click.IntRange(1),NEWLINE default=DEFAULT_RESPONSE_TIMEOUT,NEWLINE)NEWLINE@with_request_tls_verifyNEWLINE@with_request_certNEWLINE@with_request_cert_keyNEWLINE@click.option(NEWLINE "--validate-schema",NEWLINE help="Enable or disable validation of input schema.",NEWLINE type=bool,NEWLINE default=False,NEWLINE show_default=True,NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.validation,NEWLINE)NEWLINE@click.option(NEWLINE "--skip-deprecated-operations",NEWLINE help="Skip testing of deprecated API operations.",NEWLINE is_flag=True,NEWLINE is_eager=True,NEWLINE default=False,NEWLINE show_default=True,NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.filtering,NEWLINE)NEWLINE@click.option(NEWLINE "--junit-xml", help="Create junit-xml style report file at given path.", type=click.File("w", encoding="utf-8")NEWLINE)NEWLINE@click.option(NEWLINE "--debug-output-file",NEWLINE help="Save debug output as JSON lines in the given file.",NEWLINE type=click.File("w", encoding="utf-8"),NEWLINE)NEWLINE@click.option(NEWLINE "--show-errors-tracebacks",NEWLINE help="Show full tracebacks for internal errors.",NEWLINE is_flag=True,NEWLINE is_eager=True,NEWLINE default=False,NEWLINE show_default=True,NEWLINE)NEWLINE@click.option(NEWLINE "--code-sample-style",NEWLINE help="Controls the style of code samples for failure reproduction.",NEWLINE type=click.Choice([item.name for item in CodeSampleStyle]),NEWLINE default=CodeSampleStyle.default().name,NEWLINE callback=callbacks.convert_code_sample_style,NEWLINE)NEWLINE@click.option(NEWLINE "--cassette-path",NEWLINE help="Save test results as a VCR-compatible cassette.",NEWLINE type=click.File("w", encoding="utf-8"),NEWLINE is_eager=True,NEWLINE)NEWLINE@click.option(NEWLINE "--cassette-preserve-exact-body-bytes",NEWLINE help="Encode payloads in cassettes as base64.",NEWLINE is_flag=True,NEWLINE callback=callbacks.validate_preserve_exact_body_bytes,NEWLINE)NEWLINE@click.option(NEWLINE "--store-network-log",NEWLINE help="[DEPRECATED] Store requests and responses into a file.",NEWLINE type=click.File("w", encoding="utf-8"),NEWLINE)NEWLINE@click.option(NEWLINE "--fixups",NEWLINE help="Install specified compatibility fixups.",NEWLINE multiple=True,NEWLINE type=click.Choice(list(ALL_FIXUPS) + ["all"]),NEWLINE)NEWLINE@click.option(NEWLINE "--stateful",NEWLINE help="Utilize stateful testing capabilities.",NEWLINE type=click.Choice([item.name for item in Stateful]),NEWLINE default=Stateful.links.name,NEWLINE callback=callbacks.convert_stateful,NEWLINE)NEWLINE@click.option(NEWLINE "--stateful-recursion-limit",NEWLINE help="Limit recursion depth for stateful testing.",NEWLINE default=DEFAULT_STATEFUL_RECURSION_LIMIT,NEWLINE show_default=True,NEWLINE type=click.IntRange(1, 100),NEWLINE cls=DeprecatedOption,NEWLINE removed_in="4.0",NEWLINE)NEWLINE@click.option(NEWLINE "--force-schema-version",NEWLINE help="Force Schemathesis to parse the input schema with the specified spec version.",NEWLINE type=click.Choice(["20", "30"]),NEWLINE)NEWLINE@click.option(NEWLINE "--hypothesis-database",NEWLINE help="A way to store found examples in Hypothesis' database. "NEWLINE "You can either disable it completely with `none`, "NEWLINE f"do not persist bugs between test runs with `{HYPOTHESIS_IN_MEMORY_DATABASE_IDENTIFIER}` "NEWLINE "or use an arbitrary path to store examples as files.",NEWLINE type=str,NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.hypothesis,NEWLINE)NEWLINE@click.option(NEWLINE "--hypothesis-deadline",NEWLINE help="Duration in milliseconds that each individual example with a test is not allowed to exceed.",NEWLINE # max value to avoid overflow. It is the maximum amount of days in millisecondsNEWLINE type=OptionalInt(1, 999999999 * 24 * 3600 * 1000),NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.hypothesis,NEWLINE)NEWLINE@click.option(NEWLINE "--hypothesis-derandomize",NEWLINE help="Use Hypothesis's deterministic mode.",NEWLINE is_flag=True,NEWLINE default=None,NEWLINE show_default=True,NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.hypothesis,NEWLINE)NEWLINE@click.option(NEWLINE "--hypothesis-max-examples",NEWLINE help="Maximum number of generated examples per each method/path combination.",NEWLINE type=click.IntRange(1),NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.hypothesis,NEWLINE)NEWLINE@click.option(NEWLINE "--hypothesis-phases",NEWLINE help="Control which phases should be run.",NEWLINE type=CsvEnumChoice(hypothesis.Phase),NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.hypothesis,NEWLINE)NEWLINE@click.option(NEWLINE "--hypothesis-report-multiple-bugs",NEWLINE help="Raise only the exception with the smallest minimal example.",NEWLINE type=bool,NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.hypothesis,NEWLINE)NEWLINE@click.option(NEWLINE "--hypothesis-seed",NEWLINE help="Set a seed to use for all Hypothesis tests.",NEWLINE type=int,NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.hypothesis,NEWLINE)NEWLINE@click.option(NEWLINE "--hypothesis-suppress-health-check",NEWLINE help="Comma-separated list of health checks to disable.",NEWLINE type=CsvEnumChoice(hypothesis.HealthCheck),NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.hypothesis,NEWLINE)NEWLINE@click.option(NEWLINE "--hypothesis-verbosity",NEWLINE help="Verbosity level of Hypothesis messages.",NEWLINE type=click.Choice([item.name for item in hypothesis.Verbosity]),NEWLINE callback=callbacks.convert_verbosity,NEWLINE cls=GroupedOption,NEWLINE group=ParameterGroup.hypothesis,NEWLINE)NEWLINE@click.option("--no-color", help="Disable ANSI color escape codes.", type=bool, is_flag=True)NEWLINE@click.option(NEWLINE "--schemathesis-io-token",NEWLINE help="Schemathesis.io authentication token.",NEWLINE type=str,NEWLINE envvar=service.TOKEN_ENV_VAR,NEWLINE)NEWLINE@click.option(NEWLINE "--schemathesis-io-url",NEWLINE help="Schemathesis.io base URL.",NEWLINE default=service.DEFAULT_URL,NEWLINE type=str,NEWLINE envvar=service.URL_ENV_VAR,NEWLINE)NEWLINE@with_hosts_fileNEWLINE@click.option("--verbosity", "-v", help="Reduce verbosity of error output.", count=True)NEWLINE@click.pass_contextNEWLINEdef run(NEWLINE ctx: click.Context,NEWLINE schema: str,NEWLINE api_slug: Optional[str],NEWLINE auth: Optional[Tuple[str, str]],NEWLINE auth_type: str,NEWLINE headers: Dict[str, str],NEWLINE checks: Iterable[str] = DEFAULT_CHECKS_NAMES,NEWLINE data_generation_methods: Tuple[DataGenerationMethod, ...] = DEFAULT_DATA_GENERATION_METHODS,NEWLINE max_response_time: Optional[int] = None,NEWLINE targets: Iterable[str] = DEFAULT_TARGETS_NAMES,NEWLINE exit_first: bool = False,NEWLINE dry_run: bool = False,NEWLINE endpoints: Optional[Filter] = None,NEWLINE methods: Optional[Filter] = None,NEWLINE tags: Optional[Filter] = None,NEWLINE operation_ids: Optional[Filter] = None,NEWLINE workers_num: int = DEFAULT_WORKERS,NEWLINE base_url: Optional[str] = None,NEWLINE app: Optional[str] = None,NEWLINE request_timeout: Optional[int] = None,NEWLINE request_tls_verify: bool = True,NEWLINE request_cert: Optional[str] = None,NEWLINE request_cert_key: Optional[str] = None,NEWLINE validate_schema: bool = True,NEWLINE skip_deprecated_operations: bool = False,NEWLINE junit_xml: Optional[click.utils.LazyFile] = None,NEWLINE debug_output_file: Optional[click.utils.LazyFile] = None,NEWLINE show_errors_tracebacks: bool = False,NEWLINE code_sample_style: CodeSampleStyle = CodeSampleStyle.default(),NEWLINE cassette_path: Optional[click.utils.LazyFile] = None,NEWLINE cassette_preserve_exact_body_bytes: bool = False,NEWLINE store_network_log: Optional[click.utils.LazyFile] = None,NEWLINE fixups: Tuple[str] = (), # type: ignoreNEWLINE stateful: Optional[Stateful] = None,NEWLINE stateful_recursion_limit: int = DEFAULT_STATEFUL_RECURSION_LIMIT,NEWLINE force_schema_version: Optional[str] = None,NEWLINE hypothesis_database: Optional[str] = None,NEWLINE hypothesis_deadline: Optional[Union[int, NotSet]] = None,NEWLINE hypothesis_derandomize: Optional[bool] = None,NEWLINE hypothesis_max_examples: Optional[int] = None,NEWLINE hypothesis_phases: Optional[List[hypothesis.Phase]] = None,NEWLINE hypothesis_report_multiple_bugs: Optional[bool] = None,NEWLINE hypothesis_suppress_health_check: Optional[List[hypothesis.HealthCheck]] = None,NEWLINE hypothesis_seed: Optional[int] = None,NEWLINE hypothesis_verbosity: Optional[hypothesis.Verbosity] = None,NEWLINE verbosity: int = 0,NEWLINE no_color: bool = False,NEWLINE schemathesis_io_token: Optional[str] = None,NEWLINE schemathesis_io_url: str = service.DEFAULT_URL,NEWLINE hosts_file: PathLike = service.DEFAULT_HOSTS_PATH,NEWLINE) -> None:NEWLINE """Perform schemathesis test against an API specified by SCHEMA.NEWLINENEWLINE SCHEMA must be a valid URL or file path pointing to an Open API / GraphQL specification.NEWLINENEWLINE API_SLUG is an API identifier to upload data to Schemathesis.io.NEWLINE """NEWLINE # pylint: disable=too-many-localsNEWLINE maybe_disable_color(ctx, no_color)NEWLINE check_auth(auth, headers)NEWLINE selected_targets = tuple(target for target in targets_module.ALL_TARGETS if target.__name__ in targets)NEWLINENEWLINE if store_network_log and cassette_path:NEWLINE error_message(CASSETTES_PATH_INVALID_USAGE_MESSAGE)NEWLINE sys.exit(1)NEWLINE if store_network_log is not None:NEWLINE click.secho(DEPRECATED_CASSETTE_PATH_OPTION_WARNING, fg="yellow")NEWLINE cassette_path = store_network_logNEWLINENEWLINE schemathesis_io_hostname = urlparse(schemathesis_io_url).netlocNEWLINE token = schemathesis_io_token or service.hosts.get_token(hostname=schemathesis_io_hostname, hosts_file=hosts_file)NEWLINE schema_kind = callbacks.parse_schema_kind(schema, app)NEWLINE callbacks.validate_schema(schema, schema_kind, base_url=base_url, dry_run=dry_run, app=app, api_slug=api_slug)NEWLINE client = NoneNEWLINE test_run = NoneNEWLINE if api_slug is not None or schema_kind == callbacks.SchemaInputKind.SLUG:NEWLINE if token is None:NEWLINE hostname = (NEWLINE "Schemathesis.io" if schemathesis_io_hostname == service.DEFAULT_HOSTNAME else schemathesis_io_hostnameNEWLINE )NEWLINE raise click.UsageError(NEWLINE "\n\n"NEWLINE f"You are trying to upload data to {hostname}, but your CLI appears to be not authenticated.\n\n"NEWLINE "To authenticate, grab your token from `app.schemathesis.io` and run `st auth login <TOKEN>`\n"NEWLINE "Alternatively, you can pass the token explicitly via the `--schemathesis-io-token` option / "NEWLINE f"`{service.TOKEN_ENV_VAR}` environment variable\n\n"NEWLINE "See https://schemathesis.readthedocs.io/en/stable/service.html for more details"NEWLINE )NEWLINE client = service.ServiceClient(base_url=schemathesis_io_url, token=token)NEWLINE try:NEWLINE test_run = client.create_test_run(schema)NEWLINE if schema_kind == callbacks.SchemaInputKind.SLUG:NEWLINE # Replace config values with ones loaded from the serviceNEWLINE schema = test_run.config.locationNEWLINE base_url = base_url or test_run.config.base_urlNEWLINE except requests.HTTPError as exc:NEWLINE handle_service_error(exc)NEWLINENEWLINE if "all" in checks:NEWLINE selected_checks = checks_module.ALL_CHECKSNEWLINE else:NEWLINE selected_checks = tuple(check for check in checks_module.ALL_CHECKS if check.__name__ in checks)NEWLINENEWLINE if fixups:NEWLINE if "all" in fixups:NEWLINE _fixups.install()NEWLINE else:NEWLINE _fixups.install(fixups)NEWLINE hypothesis_settings = prepare_hypothesis_settings(NEWLINE database=hypothesis_database,NEWLINE deadline=hypothesis_deadline,NEWLINE derandomize=hypothesis_derandomize,NEWLINE max_examples=hypothesis_max_examples,NEWLINE phases=hypothesis_phases,NEWLINE report_multiple_bugs=hypothesis_report_multiple_bugs,NEWLINE suppress_health_check=hypothesis_suppress_health_check,NEWLINE verbosity=hypothesis_verbosity,NEWLINE )NEWLINE event_stream = into_event_stream(NEWLINE schema,NEWLINE app=app,NEWLINE base_url=base_url,NEWLINE validate_schema=validate_schema,NEWLINE skip_deprecated_operations=skip_deprecated_operations,NEWLINE data_generation_methods=data_generation_methods,NEWLINE force_schema_version=force_schema_version,NEWLINE request_tls_verify=request_tls_verify,NEWLINE request_cert=prepare_request_cert(request_cert, request_cert_key),NEWLINE auth=auth,NEWLINE auth_type=auth_type,NEWLINE headers=headers,NEWLINE endpoint=endpoints or None,NEWLINE method=methods or None,NEWLINE tag=tags or None,NEWLINE operation_id=operation_ids or None,NEWLINE request_timeout=request_timeout,NEWLINE seed=hypothesis_seed,NEWLINE exit_first=exit_first,NEWLINE dry_run=dry_run,NEWLINE store_interactions=cassette_path is not None,NEWLINE checks=selected_checks,NEWLINE max_response_time=max_response_time,NEWLINE targets=selected_targets,NEWLINE workers_num=workers_num,NEWLINE stateful=stateful,NEWLINE stateful_recursion_limit=stateful_recursion_limit,NEWLINE hypothesis_settings=hypothesis_settings,NEWLINE )NEWLINE execute(NEWLINE event_stream,NEWLINE hypothesis_settings,NEWLINE workers_num,NEWLINE show_errors_tracebacks,NEWLINE validate_schema,NEWLINE cassette_path,NEWLINE cassette_preserve_exact_body_bytes,NEWLINE junit_xml,NEWLINE verbosity,NEWLINE code_sample_style,NEWLINE debug_output_file,NEWLINE schemathesis_io_url,NEWLINE client,NEWLINE test_run,NEWLINE )NEWLINENEWLINENEWLINEdef prepare_request_cert(cert: Optional[str], key: Optional[str]) -> Optional[RequestCert]:NEWLINE if cert is not None and key is not None:NEWLINE return cert, keyNEWLINE return certNEWLINENEWLINENEWLINE@attr.s(slots=True)NEWLINEclass LoaderConfig:NEWLINE """Container for API loader parameters.NEWLINENEWLINE The main goal is to avoid too many parameters in function signatures.NEWLINE """NEWLINENEWLINE schema_location: str = attr.ib() # pragma: no mutateNEWLINE app: Any = attr.ib() # pragma: no mutateNEWLINE base_url: Optional[str] = attr.ib() # pragma: no mutateNEWLINE validate_schema: bool = attr.ib() # pragma: no mutateNEWLINE skip_deprecated_operations: bool = attr.ib() # pragma: no mutateNEWLINE data_generation_methods: Tuple[DataGenerationMethod, ...] = attr.ib() # pragma: no mutateNEWLINE force_schema_version: Optional[str] = attr.ib() # pragma: no mutateNEWLINE request_tls_verify: Union[bool, str] = attr.ib() # pragma: no mutateNEWLINE request_cert: Optional[RequestCert] = attr.ib() # pragma: no mutateNEWLINE # Network request parametersNEWLINE auth: Optional[Tuple[str, str]] = attr.ib() # pragma: no mutateNEWLINE auth_type: Optional[str] = attr.ib() # pragma: no mutateNEWLINE headers: Optional[Dict[str, str]] = attr.ib() # pragma: no mutateNEWLINE # Schema filtersNEWLINE endpoint: Optional[Filter] = attr.ib() # pragma: no mutateNEWLINE method: Optional[Filter] = attr.ib() # pragma: no mutateNEWLINE tag: Optional[Filter] = attr.ib() # pragma: no mutateNEWLINE operation_id: Optional[Filter] = attr.ib() # pragma: no mutateNEWLINENEWLINENEWLINEdef into_event_stream(NEWLINE schema_location: str,NEWLINE *,NEWLINE app: Any,NEWLINE base_url: Optional[str],NEWLINE validate_schema: bool,NEWLINE skip_deprecated_operations: bool,NEWLINE data_generation_methods: Tuple[DataGenerationMethod, ...],NEWLINE force_schema_version: Optional[str],NEWLINE request_tls_verify: Union[bool, str],NEWLINE request_cert: Optional[RequestCert],NEWLINE # Network request parametersNEWLINE auth: Optional[Tuple[str, str]],NEWLINE auth_type: Optional[str],NEWLINE headers: Optional[Dict[str, str]],NEWLINE request_timeout: Optional[int],NEWLINE # Schema filtersNEWLINE endpoint: Optional[Filter],NEWLINE method: Optional[Filter],NEWLINE tag: Optional[Filter],NEWLINE operation_id: Optional[Filter],NEWLINE # Runtime behaviorNEWLINE checks: Iterable[CheckFunction],NEWLINE max_response_time: Optional[int],NEWLINE targets: Iterable[Target],NEWLINE workers_num: int,NEWLINE hypothesis_settings: Optional[hypothesis.settings],NEWLINE seed: Optional[int],NEWLINE exit_first: bool,NEWLINE dry_run: bool,NEWLINE store_interactions: bool,NEWLINE stateful: Optional[Stateful],NEWLINE stateful_recursion_limit: int,NEWLINE) -> Generator[events.ExecutionEvent, None, None]:NEWLINE try:NEWLINE if app is not None:NEWLINE app = import_app(app)NEWLINE config = LoaderConfig(NEWLINE schema_location=schema_location,NEWLINE app=app,NEWLINE base_url=base_url,NEWLINE validate_schema=validate_schema,NEWLINE skip_deprecated_operations=skip_deprecated_operations,NEWLINE data_generation_methods=data_generation_methods,NEWLINE force_schema_version=force_schema_version,NEWLINE request_tls_verify=request_tls_verify,NEWLINE request_cert=request_cert,NEWLINE auth=auth,NEWLINE auth_type=auth_type,NEWLINE headers=headers,NEWLINE endpoint=endpoint or None,NEWLINE method=method or None,NEWLINE tag=tag or None,NEWLINE operation_id=operation_id or None,NEWLINE )NEWLINE loaded_schema = load_schema(config)NEWLINE yield from runner.from_schema(NEWLINE loaded_schema,NEWLINE auth=auth,NEWLINE auth_type=auth_type,NEWLINE headers=headers,NEWLINE request_timeout=request_timeout,NEWLINE request_tls_verify=request_tls_verify,NEWLINE request_cert=request_cert,NEWLINE seed=seed,NEWLINE exit_first=exit_first,NEWLINE dry_run=dry_run,NEWLINE store_interactions=store_interactions,NEWLINE checks=checks,NEWLINE max_response_time=max_response_time,NEWLINE targets=targets,NEWLINE workers_num=workers_num,NEWLINE stateful=stateful,NEWLINE stateful_recursion_limit=stateful_recursion_limit,NEWLINE hypothesis_settings=hypothesis_settings,NEWLINE ).execute()NEWLINE except Exception as exc:NEWLINE yield events.InternalError.from_exc(exc)NEWLINENEWLINENEWLINEdef load_schema(config: LoaderConfig) -> BaseSchema:NEWLINE """Automatically load API schema."""NEWLINE first: Callable[[LoaderConfig], BaseSchema]NEWLINE second: Callable[[LoaderConfig], BaseSchema]NEWLINE if is_probably_graphql(config.schema_location):NEWLINE # Try GraphQL first, then fallback to Open APINEWLINE first, second = (_load_graphql_schema, _load_openapi_schema)NEWLINE else:NEWLINE # Try Open API first, then fallback to GraphQLNEWLINE first, second = (_load_openapi_schema, _load_graphql_schema)NEWLINE return _try_load_schema(config, first, second)NEWLINENEWLINENEWLINEdef _try_load_schema(NEWLINE config: LoaderConfig, first: Callable[[LoaderConfig], BaseSchema], second: Callable[[LoaderConfig], BaseSchema]NEWLINE) -> BaseSchema:NEWLINE try:NEWLINE return first(config)NEWLINE except (HTTPError, SchemaLoadingError) as exc:NEWLINE try:NEWLINE return second(config)NEWLINE except (HTTPError, SchemaLoadingError):NEWLINE # Raise the first loader's errorNEWLINE raise exc # pylint: disable=raise-missing-fromNEWLINENEWLINENEWLINEdef _load_graphql_schema(config: LoaderConfig) -> GraphQLSchema:NEWLINE loader = detect_loader(config.schema_location, config.app, is_openapi=False)NEWLINE kwargs = get_graphql_loader_kwargs(loader, config)NEWLINE return loader(config.schema_location, **kwargs)NEWLINENEWLINENEWLINEdef _load_openapi_schema(config: LoaderConfig) -> BaseSchema:NEWLINE loader = detect_loader(config.schema_location, config.app, is_openapi=True)NEWLINE kwargs = get_loader_kwargs(loader, config)NEWLINE return loader(config.schema_location, **kwargs)NEWLINENEWLINENEWLINEdef detect_loader(schema_location: str, app: Any, is_openapi: bool) -> Callable:NEWLINE """Detect API schema loader."""NEWLINE if file_exists(schema_location):NEWLINE # If there is an existing file with the given name,NEWLINE # then it is likely that the user wants to load API schema from thereNEWLINE return oas_loaders.from_path if is_openapi else gql_loaders.from_path # type: ignoreNEWLINE if app is not None and not urlparse(schema_location).netloc:NEWLINE # App is passed & location is relativeNEWLINE return oas_loaders.get_loader_for_app(app) if is_openapi else gql_loaders.get_loader_for_app(app)NEWLINE # Default behaviorNEWLINE return oas_loaders.from_uri if is_openapi else gql_loaders.from_url # type: ignoreNEWLINENEWLINENEWLINEdef get_loader_kwargs(loader: Callable, config: LoaderConfig) -> Dict[str, Any]:NEWLINE """Detect the proper set of parameters for a loader."""NEWLINE # These kwargs are shared by all loadersNEWLINE kwargs = {NEWLINE "app": config.app,NEWLINE "base_url": config.base_url,NEWLINE "method": config.method,NEWLINE "endpoint": config.endpoint,NEWLINE "tag": config.tag,NEWLINE "operation_id": config.operation_id,NEWLINE "skip_deprecated_operations": config.skip_deprecated_operations,NEWLINE "validate_schema": config.validate_schema,NEWLINE "force_schema_version": config.force_schema_version,NEWLINE "data_generation_methods": config.data_generation_methods,NEWLINE }NEWLINE if loader is not oas_loaders.from_path:NEWLINE kwargs["headers"] = config.headersNEWLINE if loader in (oas_loaders.from_uri, oas_loaders.from_aiohttp):NEWLINE _add_requests_kwargs(kwargs, config)NEWLINE return kwargsNEWLINENEWLINENEWLINEdef get_graphql_loader_kwargs(NEWLINE loader: Callable,NEWLINE config: LoaderConfig,NEWLINE) -> Dict[str, Any]:NEWLINE """Detect the proper set of parameters for a loader."""NEWLINE # These kwargs are shared by all loadersNEWLINE kwargs = {NEWLINE "app": config.app,NEWLINE "base_url": config.base_url,NEWLINE "data_generation_methods": config.data_generation_methods,NEWLINE }NEWLINE if loader is not gql_loaders.from_path:NEWLINE kwargs["headers"] = config.headersNEWLINE if loader is gql_loaders.from_url:NEWLINE _add_requests_kwargs(kwargs, config)NEWLINE return kwargsNEWLINENEWLINENEWLINEdef _add_requests_kwargs(kwargs: Dict[str, Any], config: LoaderConfig) -> None:NEWLINE kwargs["verify"] = config.request_tls_verifyNEWLINE if config.request_cert is not None:NEWLINE kwargs["cert"] = config.request_certNEWLINE if config.auth is not None:NEWLINE kwargs["auth"] = get_requests_auth(config.auth, config.auth_type)NEWLINENEWLINENEWLINEdef is_probably_graphql(location: str) -> bool:NEWLINE """Detect whether it is likely that the given location is a GraphQL endpoint."""NEWLINE return location.endswith(("/graphql", "/graphql/"))NEWLINENEWLINENEWLINEdef check_auth(auth: Optional[Tuple[str, str]], headers: Dict[str, str]) -> None:NEWLINE if auth is not None and "authorization" in {header.lower() for header in headers}:NEWLINE raise click.BadParameter("Passing `--auth` together with `--header` that sets `Authorization` is not allowed.")NEWLINENEWLINENEWLINEdef get_output_handler(workers_num: int) -> EventHandler:NEWLINE if workers_num > 1:NEWLINE output_style = OutputStyle.shortNEWLINE else:NEWLINE output_style = OutputStyle.defaultNEWLINE return output_style.value()NEWLINENEWLINENEWLINEdef load_hook(module_name: str) -> None:NEWLINE """Load the given hook by importing it."""NEWLINE try:NEWLINE sys.path.append(os.getcwd()) # fix ModuleNotFoundError module in cwdNEWLINE __import__(module_name)NEWLINE except Exception as exc:NEWLINE click.secho("An exception happened during the hook loading:\n", fg="red")NEWLINE message = traceback.format_exc()NEWLINE click.secho(message, fg="red")NEWLINE raise click.Abort() from excNEWLINENEWLINENEWLINEclass OutputStyle(Enum):NEWLINE """Provide different output styles."""NEWLINENEWLINE default = output.default.DefaultOutputStyleHandlerNEWLINE short = output.short.ShortOutputStyleHandlerNEWLINENEWLINENEWLINEdef execute(NEWLINE event_stream: Generator[events.ExecutionEvent, None, None],NEWLINE hypothesis_settings: hypothesis.settings,NEWLINE workers_num: int,NEWLINE show_errors_tracebacks: bool,NEWLINE validate_schema: bool,NEWLINE cassette_path: Optional[click.utils.LazyFile],NEWLINE cassette_preserve_exact_body_bytes: bool,NEWLINE junit_xml: Optional[click.utils.LazyFile],NEWLINE verbosity: int,NEWLINE code_sample_style: CodeSampleStyle,NEWLINE debug_output_file: Optional[click.utils.LazyFile],NEWLINE schemathesis_io_url: str,NEWLINE client: Optional[service.ServiceClient],NEWLINE test_run: Optional[service.TestRun],NEWLINE) -> None:NEWLINE """Execute a prepared runner by drawing events from it and passing to a proper handler."""NEWLINE handlers: List[EventHandler] = []NEWLINE service_context = NoneNEWLINE if client is not None and test_run is not None:NEWLINE service_queue: Queue = Queue()NEWLINE service_context = ServiceContext(url=schemathesis_io_url, queue=service_queue)NEWLINE reporter = service.ServiceReporter(client=client, test_run=test_run, out_queue=service_queue)NEWLINE handlers.append(reporter)NEWLINE if junit_xml is not None:NEWLINE handlers.append(JunitXMLHandler(junit_xml))NEWLINE if debug_output_file is not None:NEWLINE handlers.append(DebugOutputHandler(debug_output_file))NEWLINE if cassette_path is not None:NEWLINE # This handler should be first to have logs writing completed when the output handler will display statisticNEWLINE handlers.append(NEWLINE cassettes.CassetteWriter(cassette_path, preserve_exact_body_bytes=cassette_preserve_exact_body_bytes)NEWLINE )NEWLINE handlers.append(get_output_handler(workers_num))NEWLINE execution_context = ExecutionContext(NEWLINE hypothesis_settings=hypothesis_settings,NEWLINE workers_num=workers_num,NEWLINE show_errors_tracebacks=show_errors_tracebacks,NEWLINE validate_schema=validate_schema,NEWLINE cassette_path=cassette_path.name if cassette_path is not None else None,NEWLINE junit_xml_file=junit_xml.name if junit_xml is not None else None,NEWLINE verbosity=verbosity,NEWLINE code_sample_style=code_sample_style,NEWLINE service=service_context,NEWLINE )NEWLINENEWLINE def shutdown() -> None:NEWLINE for _handler in handlers:NEWLINE _handler.shutdown()NEWLINENEWLINE GLOBAL_HOOK_DISPATCHER.dispatch("after_init_cli_run_handlers", HookContext(), handlers, execution_context)NEWLINE event = NoneNEWLINE try:NEWLINE for event in event_stream:NEWLINE for handler in handlers:NEWLINE handler.handle_event(execution_context, event)NEWLINE except Exception as exc:NEWLINE if isinstance(exc, click.Abort):NEWLINE # To avoid showing "Aborted!" message, which is the default behavior in ClickNEWLINE sys.exit(1)NEWLINE raiseNEWLINE finally:NEWLINE shutdown()NEWLINE if event is not None and event.is_terminal:NEWLINE exit_code = get_exit_code(event)NEWLINE sys.exit(exit_code)NEWLINE # Event stream did not finish with a terminal event. Only possible if the handler is brokenNEWLINE click.secho("Unexpected error", fg="red")NEWLINE sys.exit(1)NEWLINENEWLINENEWLINEdef handle_service_error(exc: requests.HTTPError) -> NoReturn:NEWLINE if exc.response.status_code == 404:NEWLINE error_message("API_SLUG not found!")NEWLINE else:NEWLINE output.default.display_service_error(service.Error(exc))NEWLINE sys.exit(1)NEWLINENEWLINENEWLINEdef get_exit_code(event: events.ExecutionEvent) -> int:NEWLINE if isinstance(event, events.Finished):NEWLINE if event.has_failures or event.has_errors:NEWLINE return 1NEWLINE return 0NEWLINE # Practically not possible. May occur only if the output handler is broken - in this case we still will have theNEWLINE # right exit code.NEWLINE return 1NEWLINENEWLINENEWLINE@schemathesis.command(short_help="Replay requests from a saved cassette.")NEWLINE@click.argument("cassette_path", type=click.Path(exists=True))NEWLINE@click.option("--id", "id_", help="ID of interaction to replay.", type=str)NEWLINE@click.option("--status", help="Status of interactions to replay.", type=str)NEWLINE@click.option("--uri", help="A regexp that filters interactions by their request URI.", type=str)NEWLINE@click.option("--method", help="A regexp that filters interactions by their request method.", type=str)NEWLINE@click.option("--no-color", help="Disable ANSI color escape codes.", type=bool, is_flag=True)NEWLINE@with_request_tls_verifyNEWLINE@with_request_certNEWLINE@with_request_cert_keyNEWLINE@click.pass_contextNEWLINEdef replay(NEWLINE ctx: click.Context,NEWLINE cassette_path: str,NEWLINE id_: Optional[str],NEWLINE status: Optional[str] = None,NEWLINE uri: Optional[str] = None,NEWLINE method: Optional[str] = None,NEWLINE no_color: bool = False,NEWLINE request_tls_verify: bool = True,NEWLINE request_cert: Optional[str] = None,NEWLINE request_cert_key: Optional[str] = None,NEWLINE) -> None:NEWLINE """Replay a cassette.NEWLINENEWLINE Cassettes in VCR-compatible format can be replayed.NEWLINE For example, ones that are recorded with ``store-network-log`` option of `st run` command.NEWLINE """NEWLINE maybe_disable_color(ctx, no_color)NEWLINE click.secho(f"{bold('Replaying cassette')}: {cassette_path}")NEWLINE with open(cassette_path, "rb") as fd:NEWLINE cassette = yaml.load(fd, Loader=SafeLoader)NEWLINE click.secho(f"{bold('Total interactions')}: {len(cassette['http_interactions'])}\n")NEWLINE for replayed in cassettes.replay(NEWLINE cassette,NEWLINE id_=id_,NEWLINE status=status,NEWLINE uri=uri,NEWLINE method=method,NEWLINE request_tls_verify=request_tls_verify,NEWLINE request_cert=prepare_request_cert(request_cert, request_cert_key),NEWLINE ):NEWLINE click.secho(f" {bold('ID')} : {replayed.interaction['id']}")NEWLINE click.secho(f" {bold('URI')} : {replayed.interaction['request']['uri']}")NEWLINE click.secho(f" {bold('Old status code')} : {replayed.interaction['response']['status']['code']}")NEWLINE click.secho(f" {bold('New status code')} : {replayed.response.status_code}\n")NEWLINENEWLINENEWLINE@schemathesis.group(short_help="Authenticate Schemathesis.io.")NEWLINEdef auth() -> None:NEWLINE passNEWLINENEWLINENEWLINE@auth.command(short_help="Authenticate with a Schemathesis.io host.")NEWLINE@click.argument("token", type=str, envvar=service.TOKEN_ENV_VAR)NEWLINE@click.option(NEWLINE "--hostname",NEWLINE help="The hostname of the Schemathesis.io instance to authenticate with",NEWLINE type=str,NEWLINE default=service.DEFAULT_HOSTNAME,NEWLINE envvar=service.HOSTNAME_ENV_VAR,NEWLINE)NEWLINE@click.option(NEWLINE "--protocol",NEWLINE type=click.Choice(["https", "http"]),NEWLINE default=service.DEFAULT_PROTOCOL,NEWLINE envvar=service.PROTOCOL_ENV_VAR,NEWLINE)NEWLINE@with_request_tls_verifyNEWLINE@with_hosts_fileNEWLINEdef login(token: str, hostname: str, hosts_file: str, protocol: str, request_tls_verify: bool = True) -> None:NEWLINE """Authenticate with a Schemathesis.io host.NEWLINENEWLINE Example:NEWLINE st auth login MY_TOKENNEWLINENEWLINE """NEWLINE try:NEWLINE username = service.auth.login(token, hostname, protocol, request_tls_verify)NEWLINE service.hosts.store(token, hostname, hosts_file)NEWLINE success_message(f"Logged in into {hostname} as " + bold(username))NEWLINE except requests.HTTPError as exc:NEWLINE detail = exc.response.json()["detail"]NEWLINE error_message(f"Failed to login into {hostname}: " + bold(detail))NEWLINE sys.exit(1)NEWLINENEWLINENEWLINE@auth.command(short_help="Remove authentication for a Schemathesis.io host.")NEWLINE@click.option(NEWLINE "--hostname",NEWLINE help="The hostname of the Schemathesis.io instance to authenticate with",NEWLINE type=str,NEWLINE default=service.DEFAULT_HOSTNAME,NEWLINE envvar=service.HOSTNAME_ENV_VAR,NEWLINE)NEWLINE@with_hosts_fileNEWLINEdef logout(hostname: str, hosts_file: str) -> None:NEWLINE """Remove authentication for a Schemathesis.io host."""NEWLINE result = service.hosts.remove(hostname, hosts_file)NEWLINE if result == service.hosts.RemoveAuth.success:NEWLINE success_message(f"Logged out of {hostname} account")NEWLINE else:NEWLINE if result == service.hosts.RemoveAuth.no_match:NEWLINE warning_message(f"Not logged in to {hostname}")NEWLINE if result == service.hosts.RemoveAuth.no_hosts:NEWLINE warning_message("Not logged in to any hosts")NEWLINE if result == service.hosts.RemoveAuth.error:NEWLINE error_message(f"Failed to read the hosts file. Try to remove {hosts_file}")NEWLINE sys.exit(1)NEWLINENEWLINENEWLINEdef success_message(message: str) -> None:NEWLINE click.secho(click.style("✔️", fg="green") + f" {message}")NEWLINENEWLINENEWLINEdef warning_message(message: str) -> None:NEWLINE click.secho(click.style("🟡️", fg="yellow") + f" {message}")NEWLINENEWLINENEWLINEdef error_message(message: str) -> None:NEWLINE click.secho(f"❌ {message}")NEWLINENEWLINENEWLINEdef bold(message: str) -> str:NEWLINE return click.style(message, bold=True)NEWLINENEWLINENEWLINEdef maybe_disable_color(ctx: click.Context, no_color: bool) -> None:NEWLINE if no_color or "NO_COLOR" in os.environ:NEWLINE ctx.color = FalseNEWLINENEWLINENEWLINE@HookDispatcher.register_spec([HookScope.GLOBAL])NEWLINEdef after_init_cli_run_handlers(NEWLINE context: HookContext, handlers: List[EventHandler], execution_context: ExecutionContextNEWLINE) -> None:NEWLINE """Called after CLI hooks are initialized.NEWLINENEWLINE Might be used to add extra event handlers.NEWLINE """NEWLINENEWLINENEWLINE@HookDispatcher.register_spec([HookScope.GLOBAL])NEWLINEdef before_call(context: HookContext, case: Case) -> None:NEWLINE """Called before every network call in CLI tests.NEWLINENEWLINE Use cases:NEWLINE - Modification of `case`. For example, adding some pre-determined value to its query string.NEWLINE - LoggingNEWLINE """NEWLINENEWLINENEWLINE@HookDispatcher.register_spec([HookScope.GLOBAL])NEWLINEdef after_call(context: HookContext, case: Case, response: GenericResponse) -> None:NEWLINE """Called after every network call in CLI tests.NEWLINENEWLINE Note that you need to modify the response in-place.NEWLINENEWLINE Use cases:NEWLINE - Response post-processing, like modifying its payload.NEWLINE - LoggingNEWLINE """NEWLINENEWLINENEWLINE@HookDispatcher.register_spec([HookScope.GLOBAL])NEWLINEdef process_call_kwargs(context: HookContext, case: Case, kwargs: Dict[str, Any]) -> None:NEWLINE """Called before every network call in CLI tests.NEWLINENEWLINE Aims to modify the argument passed to `case.call` / `case.call_wsgi` / `case.call_asgi`.NEWLINE Note that you need to modify `kwargs` in-place.NEWLINE """NEWLINE
import osNEWLINEimport timeNEWLINEfrom pathlib import PathNEWLINEimport loggingNEWLINENEWLINEimport casperlabs_clientNEWLINEfrom casperlabs_client.abi import ABINEWLINENEWLINEBASE_PATH = Path(os.path.dirname(os.path.abspath(__file__))).parentNEWLINEERC20_WASM = f"{BASE_PATH}/execution-engine/target/wasm32-unknown-unknown/release/erc20_smart_contract.wasm"NEWLINENEWLINENEWLINE# At the beginning of a serialized version of Rust's Vec<u8>, first 4 bytes represent the size of the vector.NEWLINE#NEWLINE# Balances are 33 bytes arrays where:NEWLINE# - the first byte is "01";NEWLINE# - the rest is 32 bytes of the account's public key.NEWLINE#NEWLINE# Allowances are 64 bytes arrays where:NEWLINE# - the first 32 bytes are token owner's public key;NEWLINE# - the second 32 bytes are token spender's public key.NEWLINE#NEWLINE# Decimal version of "21 00 00 00" is 33.NEWLINE# Decimal version of "40 00 00 00" is 64.NEWLINEBALANCE_KEY_SIZE_HEX = "21000000"NEWLINEALLOWANCE_KEY_SIZE_HEX = "40000000"NEWLINEBALANCE_BYTE = "01"NEWLINEPAYMENT_AMOUNT = 10 ** 7NEWLINENEWLINENEWLINEclass Node:NEWLINE def __init__(NEWLINE self,NEWLINE host,NEWLINE port=casperlabs_client.DEFAULT_PORT,NEWLINE port_internal=casperlabs_client.DEFAULT_INTERNAL_PORT,NEWLINE ):NEWLINE self.host = hostNEWLINE self.port = portNEWLINE self.port_internal = port_internalNEWLINE self.client = casperlabs_client.CasperLabsClient(NEWLINE host=self.host, port=port, port_internal=port_internalNEWLINE )NEWLINENEWLINENEWLINEclass Agent:NEWLINE """NEWLINE An account that will be used to call contracts.NEWLINE """NEWLINENEWLINE def __init__(self, name):NEWLINE self.name = nameNEWLINE logging.debug(f"Agent {str(self)}")NEWLINENEWLINE def __str__(self):NEWLINE return f"{self.name}: {self.public_key_hex}"NEWLINENEWLINE def on(self, node):NEWLINE """NEWLINE Bind agent to a node.NEWLINE """NEWLINE return BoundAgent(self, node)NEWLINENEWLINE @propertyNEWLINE def private_key(self):NEWLINE return f"{BASE_PATH}/hack/docker/keys/{self.name}/account-private.pem"NEWLINENEWLINE @propertyNEWLINE def public_key(self):NEWLINE return f"{BASE_PATH}/hack/docker/keys/{self.name}/account-public.pem"NEWLINENEWLINE @propertyNEWLINE def public_key_hex(self):NEWLINE with open(f"{BASE_PATH}/hack/docker/keys/{self.name}/account-id-hex") as f:NEWLINE return f.read().strip()NEWLINENEWLINENEWLINEclass BoundAgent:NEWLINE """NEWLINE An agent that is bound to a node. Can be used to call a contract or issue a query.NEWLINE """NEWLINENEWLINE def __init__(self, agent, node):NEWLINE self.agent = agentNEWLINE self.node = nodeNEWLINENEWLINE def call_contract(self, method, wait_for_processed=True):NEWLINE deploy_hash = method(self)NEWLINE if wait_for_processed:NEWLINE self.wait_for_deploy_processed(deploy_hash)NEWLINE return deploy_hashNEWLINENEWLINE def query(self, method):NEWLINE return method(self)NEWLINENEWLINE def transfer_clx(self, recipient_public_hex, amount, wait_for_processed=False):NEWLINE deploy_hash = self.node.client.transfer(NEWLINE recipient_public_hex,NEWLINE amount,NEWLINE payment_amount=PAYMENT_AMOUNT,NEWLINE from_addr=self.agent.public_key_hex,NEWLINE private_key=self.agent.private_key,NEWLINE )NEWLINE if wait_for_processed:NEWLINE self.wait_for_deploy_processed(deploy_hash)NEWLINE return deploy_hashNEWLINENEWLINE def wait_for_deploy_processed(self, deploy_hash, on_error_raise=True):NEWLINE result = NoneNEWLINE while True:NEWLINE result = self.node.client.showDeploy(deploy_hash)NEWLINE if result.status.state != 1: # PENDINGNEWLINE breakNEWLINE # result.status.state == PROCESSED (2)NEWLINE time.sleep(0.1)NEWLINE if on_error_raise:NEWLINE last_processing_result = result.processing_results[0]NEWLINE if last_processing_result.is_error:NEWLINE raise Exception(NEWLINE f"Deploy {deploy_hash} execution error: {last_processing_result.error_message}"NEWLINE )NEWLINENEWLINENEWLINEclass SmartContract:NEWLINE """NEWLINE Python interface for calling smart contracts.NEWLINE """NEWLINENEWLINE def __init__(self, file_name, methods):NEWLINE """NEWLINE :param file_name: Path to WASM file with smart contract.NEWLINE :param methods: Dictionary mapping contract methods toNEWLINE their signatures: names and types ofNEWLINE their parameters. See ERC20 for an example.NEWLINE """NEWLINE self.file_name = file_nameNEWLINE self.methods = methodsNEWLINENEWLINE def contract_hash_by_name(self, bound_agent, deployer, contract_name, block_hash):NEWLINE response = bound_agent.node.client.queryState(NEWLINE block_hash, key=deployer, path=contract_name, keyType="address"NEWLINE )NEWLINE return response.key.hash.hashNEWLINENEWLINE def __getattr__(self, name):NEWLINE return self.method(name)NEWLINENEWLINE def abi_encode_args(self, method_name, parameters, kwargs):NEWLINE args = [ABI.string_value("method", method_name)] + [NEWLINE parameters[p](p, kwargs[p]) for p in parametersNEWLINE ]NEWLINE return ABI.args(args)NEWLINENEWLINE def method(self, name):NEWLINE """NEWLINE Returns a function representing a smart contract's method.NEWLINENEWLINE The function returned can be called with keyword arguments matchingNEWLINE the smart contract's parameters and it will return a function thatNEWLINE accepts a BoundAgent and actually call the smart contract on a node.NEWLINENEWLINE :param name: name of the smart contract's methodNEWLINE """NEWLINE if name not in self.methods:NEWLINE raise Exception(f"unknown method {name}")NEWLINENEWLINE def callable_method(**kwargs):NEWLINE parameters = self.methods[name]NEWLINE if set(kwargs.keys()) != set(parameters.keys()):NEWLINE raise Exception(NEWLINE f"Arguments ({kwargs.keys()}) don't match parameters ({parameters.keys()}) of method {name}"NEWLINE )NEWLINE arguments = self.abi_encode_args(name, parameters, kwargs)NEWLINE arguments_string = f"{name}({','.join(f'{p}={type(kwargs[p]) == bytes and kwargs[p].hex() or kwargs[p]}' for p in parameters)})"NEWLINENEWLINE def deploy(bound_agent, **session_reference):NEWLINE kwargs = dict(NEWLINE public_key=bound_agent.agent.public_key,NEWLINE private_key=bound_agent.agent.private_key,NEWLINE payment_amount=PAYMENT_AMOUNT,NEWLINE session_args=arguments,NEWLINE )NEWLINE if session_reference:NEWLINE kwargs.update(session_reference)NEWLINE else:NEWLINE kwargs["session"] = self.file_nameNEWLINE logging.debug(f"Call {arguments_string}")NEWLINE # TODO: deploy will soon return just the deploy_hash onlyNEWLINE _, deploy_hash = bound_agent.node.client.deploy(**kwargs)NEWLINE deploy_hash = deploy_hash.hex()NEWLINE return deploy_hashNEWLINENEWLINE return deployNEWLINENEWLINE return callable_methodNEWLINENEWLINENEWLINEclass DeployedERC20:NEWLINE """NEWLINE Interface to an already deployed ERC20 smart contract.NEWLINE """NEWLINENEWLINE def __init__(self, erc20, token_hash, proxy_hash):NEWLINE """NEWLINE This constructor is not to be used directly, useNEWLINE DeployedERC20.create instead.NEWLINE """NEWLINE self.erc20 = erc20NEWLINE self.token_hash = token_hashNEWLINE self.proxy_hash = proxy_hashNEWLINENEWLINE @classmethodNEWLINE def create(cls, deployer: BoundAgent, token_name: str):NEWLINE """NEWLINE Returns DeployedERC20 object that provides interface toNEWLINE a deployed ERC20 smart contract.NEWLINE """NEWLINE erc20 = ERC20(token_name)NEWLINE block_hash = last_block_hash(deployer.node)NEWLINE return DeployedERC20(NEWLINE erc20,NEWLINE erc20.token_hash(deployer, deployer.agent.public_key_hex, block_hash),NEWLINE erc20.proxy_hash(deployer, deployer.agent.public_key_hex, block_hash),NEWLINE )NEWLINENEWLINE def balance(self, account_public_hex):NEWLINE """NEWLINE Returns function that can be passed a bound agent to returnNEWLINE the amount of ERC20 tokens deposited in the given account.NEWLINE """NEWLINENEWLINE def execute(bound_agent):NEWLINE key = f"{self.token_hash.hex()}:{BALANCE_KEY_SIZE_HEX}{BALANCE_BYTE}{account_public_hex}"NEWLINE block_hash_hex = last_block_hash(bound_agent.node)NEWLINE response = bound_agent.node.client.queryState(NEWLINE block_hash_hex, key=key, path="", keyType="local"NEWLINE )NEWLINE return int(response.big_int.value)NEWLINENEWLINE return executeNEWLINENEWLINE def transfer(self, sender_private_key, recipient_public_key_hex, amount):NEWLINE """NEWLINE Returns a function that can be passed a bound agent and transferNEWLINE given amount of ERC20 tokens from sender to recipient.NEWLINE """NEWLINENEWLINE def execute(bound_agent):NEWLINE return self.erc20.method("transfer")(NEWLINE erc20=self.token_hash,NEWLINE recipient=bytes.fromhex(recipient_public_key_hex),NEWLINE amount=amount,NEWLINE )(bound_agent, private_key=sender_private_key, session_hash=self.proxy_hash)NEWLINENEWLINE return executeNEWLINENEWLINENEWLINEclass ERC20(SmartContract):NEWLINE methods = {NEWLINE "deploy": {"token_name": ABI.string_value, "initial_balance": ABI.big_int},NEWLINE "transfer": {NEWLINE "erc20": ABI.bytes_value,NEWLINE "recipient": ABI.bytes_value,NEWLINE "amount": ABI.big_int,NEWLINE },NEWLINE "approve": {NEWLINE "erc20": ABI.bytes_value,NEWLINE "recipient": ABI.bytes_value,NEWLINE "amount": ABI.big_int,NEWLINE },NEWLINE "transfer_from": {NEWLINE "erc20": ABI.bytes_value,NEWLINE "owner": ABI.bytes_value,NEWLINE "recipient": ABI.bytes_value,NEWLINE "amount": ABI.big_int,NEWLINE },NEWLINE }NEWLINENEWLINE def __init__(self, token_name):NEWLINE super().__init__(ERC20_WASM, ERC20.methods)NEWLINE self.token_name = token_nameNEWLINE self.proxy_name = "erc20_proxy"NEWLINENEWLINE def abi_encode_args(self, method_name, parameters, kwargs):NEWLINE # When using proxy make sure that token_hash ('erc20') is the first argumentNEWLINE args = (NEWLINE [parameters[p](p, kwargs[p]) for p in parameters if p == "erc20"]NEWLINE + [ABI.string_value("method", method_name)]NEWLINE + [parameters[p](p, kwargs[p]) for p in parameters if p != "erc20"]NEWLINE )NEWLINE return ABI.args(args)NEWLINENEWLINE def proxy_hash(self, bound_agent, deployer_public_hex, block_hash):NEWLINE return self.contract_hash_by_name(NEWLINE bound_agent, deployer_public_hex, self.proxy_name, block_hashNEWLINE )NEWLINENEWLINE def token_hash(self, bound_agent, deployer_public_hex, block_hash):NEWLINE return self.contract_hash_by_name(NEWLINE bound_agent, deployer_public_hex, self.token_name, block_hashNEWLINE )NEWLINENEWLINE def deploy(self, initial_balance=None):NEWLINE def execute(bound_agent):NEWLINE deploy_hash = self.method("deploy")(NEWLINE token_name=self.token_name, initial_balance=initial_balanceNEWLINE )(bound_agent)NEWLINE return deploy_hashNEWLINENEWLINE return executeNEWLINENEWLINENEWLINEdef last_block_hash(node):NEWLINE return next(node.client.showBlocks(1)).summary.block_hash.hex()NEWLINE
import networkxNEWLINEimport reNEWLINEfrom konlpy.tag import Mecab,OktNEWLINEimport mathNEWLINEimport pandas as pdNEWLINEfrom tqdm import tqdmNEWLINENEWLINE# Textrank 요약NEWLINEclass TextRank:NEWLINE def __init__(self, **kargs):NEWLINE self.graph = NoneNEWLINE self.window = kargs.get('window', 5)NEWLINE self.coef = kargs.get('coef', 1.0)NEWLINE self.threshold = kargs.get('threshold', 0.005)NEWLINE self.dictCount = {}NEWLINE self.dictBiCount = {}NEWLINE self.dictNear = {}NEWLINE self.nTotal = 0NEWLINENEWLINE def clean_text(self,texts):NEWLINE law = re.sub(r'\【이유\】', '', texts) # remove startNEWLINE law = re.sub(r'\【이 유\】', '', law) # remove startNEWLINE law = re.sub(r'[@%\\*=()/~#&\+á?\xc3\xa1\-\|\:\;\!\-\,\_\~\$\'\"\[\]]', '', law) # remove punctuationNEWLINE law = re.sub(r'\d\.', '', law) # remove number with punctuationNEWLINE law = re.sub(r'\d+', '', law) # remove numberNEWLINE law = re.sub(r'[①②③④⑤⑥⑦]', '', law) # remove numberNEWLINE return lawNEWLINENEWLINE def loadSents(self, sentenceIter, tokenizer=Okt()):NEWLINE def similarity(a, b):NEWLINE n = len(a.intersection(b))NEWLINE return n / float(len(a) + len(b) - n) / (math.log(len(a) + 1) * math.log(len(b) + 1))NEWLINENEWLINE if not tokenizer: rgxSplitter = re.compile('[\\s.,:;-?!()"\']+')NEWLINE sentSet = []NEWLINE for sent in filter(None, sentenceIter):NEWLINE if type(sent) == str:NEWLINE if tokenizer:NEWLINE s = set(filter(None, tokenizer(sent)))NEWLINE else:NEWLINE s = set(filter(None, rgxSplitter.split(sent)))NEWLINE else:NEWLINE s = set(sent)NEWLINE # 해당 문장을 토크나이저로 자른 형태들, 2보다 작다면 이는 여기서 NNG, NN, VV, VA을 포함하는 요소가 아예 없거나 하나밖에 없다는 뜻NEWLINE if len(s) < 2: continueNEWLINE self.dictCount[len(self.dictCount)] = sentNEWLINE sentSet.append(s)NEWLINE # sentSet : {('아버지', 'NNG'), ('식당', 'NNG')} 등의 형태로 문장의 토큰들을 저장한 곳NEWLINENEWLINE # 모든 문장의 조합에 대해서 similarity 계산 후 dicBiCount에 저장NEWLINE for i in range(len(self.dictCount)):NEWLINE for j in range(i + 1, len(self.dictCount)):NEWLINE s = similarity(sentSet[i], sentSet[j])NEWLINE if s < self.threshold: continueNEWLINE self.dictBiCount[i, j] = sNEWLINENEWLINE def build(self):NEWLINE self.graph = networkx.Graph()NEWLINE self.graph.add_nodes_from(self.dictCount.keys())NEWLINE for (a, b), n in self.dictBiCount.items():NEWLINE self.graph.add_edge(a, b, weight=n * self.coef + (1 - self.coef))NEWLINENEWLINE def rank(self):NEWLINE return networkx.pagerank(self.graph, weight='weight')NEWLINENEWLINE def summarize(self, ratio=0.333):NEWLINE r = self.rank()NEWLINE ks = sorted(r, key=r.get, reverse=True)NEWLINE score = int(len(r)*ratio)NEWLINENEWLINE # 문장 수NEWLINE if score < 3 :NEWLINE score = len(r)NEWLINE elif score >= 3:NEWLINE score = 3NEWLINE else:NEWLINE passNEWLINENEWLINENEWLINE ks = ks[:score]NEWLINE return ' '.join(map(lambda k: self.dictCount[k], sorted(ks)))NEWLINENEWLINE def law_to_list(self,data):NEWLINE clean_law=self.clean_text(data)NEWLINE line_law=clean_law.split('.')NEWLINE df_line = pd.DataFrame(line_law)NEWLINE df_line.columns=['original']NEWLINE df_line['length'] = df_line['original'].apply(lambda x: len(x))NEWLINE df_line.drop(df_line.loc[df_line['length'] <= 1].index, inplace=True)NEWLINE df_line.reset_index(drop=True, inplace=True)NEWLINE return df_lineNEWLINENEWLINENEWLINE def predict(self,data_path):NEWLINE # data = pd.read_csv(data_path, sep='\t')NEWLINE data = pd.read_csv(data_path)NEWLINE summary=[]NEWLINE tagger=Okt()NEWLINE data=data.iloc[:10,:]NEWLINE for i in tqdm(range(0,len(data))):NEWLINE self.dictCount = {}NEWLINE self.dictBiCount = {}NEWLINE self.dictNear = {}NEWLINE self.nTotal = 0NEWLINENEWLINE text=data['article_original'][i]NEWLINE l_list=self.law_to_list(text)NEWLINE stopword = set([('있', 'VV'), ('하', 'VV'), ('되', 'VV')])NEWLINE # print(l_list['original'])NEWLINE self.loadSents(l_list['original'],NEWLINE lambda sent: filter(NEWLINE lambda x: x not in stopword and x[1] in (NEWLINE 'NNG', 'NNP', 'VV', 'VA', 'Noun', 'verb', 'Adjective'),NEWLINE tagger.pos(sent))) # 명사 ,명사 ,동사,NEWLINE self.build()NEWLINE self.rank()NEWLINE final=self.summarize(0.3)NEWLINE rate=0.3NEWLINE while final=='' and rate <=1:NEWLINE final=self.summarize(rate)NEWLINE rate += 0.2NEWLINE # print(final[:100])NEWLINE # summary.append({NEWLINE # "origin" : text,NEWLINE # "origin_sum": data.iloc[i, 0],NEWLINE # 'textrank_sum' : final,NEWLINE # })NEWLINE summary.append(final)NEWLINE # return pd.DataFrame(summary)NEWLINE data['textrank_sum'] = summaryNEWLINE return dataNEWLINENEWLINENEWLINENEWLINENEWLINENEWLINENEWLINEif __name__=='__main__':NEWLINE tr = TextRank()NEWLINE data = tr.fit(df.iloc[10, 1])['original']NEWLINE tagger = Okt()NEWLINE stopword = set([('있', 'VV'), ('하', 'VV'), ('되', 'VV')])NEWLINE tr.loadSents(data,NEWLINE lambda sent: filter(NEWLINE lambda x: x not in stopword and x[1] in ('NNG', 'NNP', 'VV', 'VA', 'Noun', 'verb', 'Adjective'),NEWLINE tagger.pos(sent))) # 명사 ,명사 ,동사,NEWLINENEWLINE tr.build()NEWLINE ranks = tr.rank()NEWLINE tr.summarize(0.3)NEWLINE
'''define the config file for voc and resnet50os8'''NEWLINEfrom .base_cfg import *NEWLINENEWLINENEWLINE# modify dataset configNEWLINEDATASET_CFG = DATASET_CFG.copy()NEWLINEDATASET_CFG['train'].update(NEWLINE {NEWLINE 'type': 'voc',NEWLINE 'set': 'trainaug',NEWLINE 'rootdir': 'data/VOCdevkit/VOC2012',NEWLINE }NEWLINE)NEWLINEDATASET_CFG['test'].update(NEWLINE {NEWLINE 'type': 'voc',NEWLINE 'rootdir': 'data/VOCdevkit/VOC2012',NEWLINE }NEWLINE)NEWLINE# modify dataloader configNEWLINEDATALOADER_CFG = DATALOADER_CFG.copy()NEWLINE# modify optimizer configNEWLINEOPTIMIZER_CFG = OPTIMIZER_CFG.copy()NEWLINEOPTIMIZER_CFG.update(NEWLINE {NEWLINE 'max_epochs': 60,NEWLINE }NEWLINE)NEWLINE# modify losses configNEWLINELOSSES_CFG = LOSSES_CFG.copy()NEWLINE# modify model configNEWLINEMODEL_CFG = MODEL_CFG.copy()NEWLINEMODEL_CFG.update(NEWLINE {NEWLINE 'num_classes': 21,NEWLINE 'backbone': {NEWLINE 'type': 'resnet50',NEWLINE 'series': 'resnet',NEWLINE 'pretrained': True,NEWLINE 'outstride': 8,NEWLINE 'use_stem': True,NEWLINE 'selected_indices': (2, 3),NEWLINE },NEWLINE }NEWLINE)NEWLINE# modify inference configNEWLINEINFERENCE_CFG = INFERENCE_CFG.copy()NEWLINE# modify common configNEWLINECOMMON_CFG = COMMON_CFG.copy()NEWLINECOMMON_CFG['train'].update(NEWLINE {NEWLINE 'backupdir': 'apcnet_resnet50os8_voc_train',NEWLINE 'logfilepath': 'apcnet_resnet50os8_voc_train/train.log',NEWLINE }NEWLINE)NEWLINECOMMON_CFG['test'].update(NEWLINE {NEWLINE 'backupdir': 'apcnet_resnet50os8_voc_test',NEWLINE 'logfilepath': 'apcnet_resnet50os8_voc_test/test.log',NEWLINE 'resultsavepath': 'apcnet_resnet50os8_voc_test/apcnet_resnet50os8_voc_results.pkl'NEWLINE }NEWLINE)
from django.views.decorators.csrf import csrf_exemptNEWLINEfrom django.shortcuts import renderNEWLINEfrom django.http import JsonResponseNEWLINEfrom backend.models import users,interviewer,interviewee,hr,play,interview,position,applyNEWLINEfrom django.views.decorators.csrf import csrf_protectNEWLINEfrom django.db.models import QNEWLINEfrom datetime import datetimeNEWLINEimport timeNEWLINEimport smtplibNEWLINEfrom email.mime.text import MIMETextNEWLINEfrom email.header import HeaderNEWLINEfrom PIL import ImageNEWLINEfrom email.utils import formataddrNEWLINEimport urllib.requestNEWLINEimport randomNEWLINEimport osNEWLINENEWLINE@csrf_exemptNEWLINE#新建用户 get用户信息NEWLINEdef user(request):NEWLINE result = {'verdict': 'ok', 'message': 'successful!'}NEWLINE if request.method == 'POST':NEWLINE print(request.POST)NEWLINE username = request.POST['username']NEWLINE password = request.POST['password']NEWLINE email = request.POST['email']NEWLINE username = str(username)NEWLINE password = str(password)NEWLINE email = str(email)NEWLINE result['email'] = emailNEWLINE result['password'] = passwordNEWLINE result['username'] = usernameNEWLINE #return JsonResponse(result)NEWLINE userinfo = users.objects.filter(Q(email = email)|Q(username = username))NEWLINE print (userinfo)NEWLINE if userinfo:NEWLINE result['verdict'] = 'error'NEWLINE result['message'] = 'The email or username already exits!'NEWLINE else:NEWLINE user = users(username = username , password = password ,email = email)NEWLINENEWLINE iner = interviewer.objects.create()NEWLINE inee =interviewee.objects.create()NEWLINE ihr =hr.objects.create()NEWLINE user.save()NEWLINE print(iner.er_id)NEWLINE play.objects.create(user=user,er_id=iner,ee_id=inee,hr_id=ihr)NEWLINENEWLINE return JsonResponse(result)NEWLINE else :NEWLINE username = request.session.get('username','')NEWLINE userinfo = users.objects.filter(username=username)NEWLINE if userinfo:NEWLINE result['username'] = usernameNEWLINE result['email'] = str(list(userinfo.values('email'))[0]['email'])NEWLINE result['role'] = str(request.session["role"])NEWLINE #result['avatar'] = '/media/'+str(list(userinfo.values('avatar'))[0]['avatar'])NEWLINE else:NEWLINE result['verdict'] = 'error'NEWLINE result['message'] = 'Please log in first!'NEWLINE return JsonResponse(result)NEWLINENEWLINE#登录NEWLINE@csrf_exemptNEWLINEdef login(request):NEWLINE if request.method == 'POST':NEWLINE username = request.POST['username']NEWLINE password = request.POST['password']NEWLINE role = request.POST['role']NEWLINE role=int(role)NEWLINE result = {'verdict': 'ok', 'message': 'successful'}NEWLINE userinfo = users.objects.filter(username = username,password = password)NEWLINE if userinfo:NEWLINE request.session["username"] = usernameNEWLINE print("FUCK!!!!!!!!!!!!!!!!!!!!!!")NEWLINE print (request.session["username"])NEWLINE if role==0:NEWLINE request.session["role"] = 0NEWLINE elif role ==1:NEWLINE request.session["role"] = 1NEWLINE elif role == 2:NEWLINE request.session["role"] = 2NEWLINE else:NEWLINE result['verdict'] = 'error'NEWLINE result['message'] = 'Please select your role!'NEWLINE else:NEWLINE print ("login error!")NEWLINE result['verdict'] = 'error'NEWLINE result['message'] = 'The Username or Password is not correct.'NEWLINE return JsonResponse(result)NEWLINENEWLINE#登出NEWLINEdef logout(request):NEWLINE del request.session["username"]NEWLINE result = {'verdict':'ok','message':'successful'}NEWLINE return render(request, "login.html")NEWLINENEWLINENEWLINENEWLINE'''NEWLINEPOST 确定面试时间NEWLINE参数NEWLINEee_idNEWLINEtimeNEWLINEer_idNEWLINEpos_idNEWLINE返回NEWLINEverdictNEWLINEmessageNEWLINENEWLINEee_id pos_id ->apply_idNEWLINEer_id time ee_id apply_id => interviewNEWLINENEWLINEGET 得到面试时间NEWLINENEWLINE返回NEWLINEinterviews:NEWLINENEWLINE[ { "job_title": "Google SDE", "date": [y,mo,d] },NEWLINE { "job_title": "Amazon SDE", "date": [y,mo,d] ]NEWLINENEWLINEuser -> apply->interviewNEWLINENEWLINE'''NEWLINENEWLINENEWLINE@csrf_exemptNEWLINEdef interview_time(request):NEWLINE result = {'verdict': 'ok', 'message': 'successful!'}NEWLINE if request.method == 'POST':NEWLINE time = request.POST['time']NEWLINE interviewer_id=request.POST['er_id']NEWLINE interviewee_id=request.POST['ee_id']NEWLINE position_id = request.POST['pos_id']NEWLINE interviewee_id=int(interviewee_id)NEWLINE interviewer_id=int(interviewer_id)NEWLINE position_id=int(position_id)NEWLINENEWLINE iinterviewer=interviewer.objects.get(er_id=interviewer_id)NEWLINE iinterviewee=interviewee.objects.get(ee_id=interviewee_id)NEWLINE iposition=position.objects.get(position_id=position_id)NEWLINE iapply=apply.objects.get(ee_id=iinterviewee,position_id=iposition)NEWLINENEWLINE interview.objects.create(er_id=iinterviewer,ee_id =iinterviewee,apply_id=iapply,date=time)NEWLINE return JsonResponse(result)NEWLINENEWLINE if request.method == 'GET':NEWLINE interviews=[]NEWLINE username = request.session.get('username','')NEWLINE userinfo = users.objects.get(username=username)NEWLINE iplay =play.objects.get(user=userinfo)NEWLINE print (iplay.user.username)NEWLINE iapply=apply.objects.filter(ee_id=iplay.ee_id)NEWLINE for iiapply in iapply:NEWLINE iinterview=interview.objects.filter(apply_id=iiapply)NEWLINE for iiinterview in iinterview:NEWLINE ainterview={}NEWLINE ainterview["job_title"]=iiinterview.apply_id.position_id.jobNEWLINE ainterview["date"]=iiinterview.dateNEWLINE interviews.append(ainterview)NEWLINE result['interviews']=interviewsNEWLINE return JsonResponse(result)NEWLINENEWLINENEWLINENEWLINE@csrf_exemptNEWLINE#发布岗位NEWLINEdef release_job(request):NEWLINE result = {'verdict':'ok','message':'successful'}NEWLINE if request.method == 'POST':NEWLINE job = request.POST['job']NEWLINE job_description = request.POST['job_description']NEWLINE excepted_salary = request.POST['excepted_salary']NEWLINE location=request.POST['location']NEWLINENEWLINE result['job'] = jobNEWLINE result['job_description'] = job_descriptionNEWLINE result['excepted_salary'] = excepted_salaryNEWLINE result['location'] = locationNEWLINENEWLINE username = request.session.get('username','')NEWLINE userinfo = users.objects.get(username=username)NEWLINE if userinfo:NEWLINE print( userinfo.email)NEWLINE iplay=play.objects.get(user=userinfo)NEWLINE print (iplay.hr_id)NEWLINE position.objects.create(job=job,location=location,excepted_salary=excepted_salary,NEWLINE job_description=job_description,hr=iplay.hr_id)NEWLINE else:NEWLINE result['verdict'] = 'fail'NEWLINE result['message'] = "The hr don't exits!"NEWLINE return JsonResponse(result)NEWLINENEWLINENEWLINENEWLINENEWLINE# 返回面试状态NEWLINEdef get_interview_status(request):NEWLINE result = {'verdict':'ok','message':'successful'}NEWLINE if request.method == 'GET':NEWLINE interviews=[]NEWLINE username = request.session.get('username','')NEWLINE userinfo = users.objects.get(username=username)NEWLINE iplay =play.objects.get(user=userinfo)NEWLINE print (iplay.user.username)NEWLINE iapply=apply.objects.filter(ee_id=iplay.ee_id)NEWLINE for iiapply in iapply:NEWLINE iinterview=interview.objects.filter(apply_id=iiapply)NEWLINE for iiinterview in iinterview:NEWLINE ainterview={}NEWLINE ainterview["job_title"]=iiinterview.apply_id.position_id.jobNEWLINE ainterview["status"]=iiinterview.statusNEWLINE interviews.append(ainterview)NEWLINE result['interviews']=interviewsNEWLINE return JsonResponse(result)NEWLINENEWLINENEWLINENEWLINENEWLINE@csrf_exemptNEWLINE# 申请工作NEWLINEdef apply_job(request):NEWLINE if request.method == "POST":NEWLINE username = request.session.get('username','')NEWLINE pos_id = request.POST['pos_id']NEWLINE print (username)NEWLINE result = {'verdict':'error','message':'No resume!'}NEWLINE resume =request.FILES.get("resume", None) # 获取上传的文件,如果没有文件,则默认为NoneNEWLINE if not resume:NEWLINE return JsonResponse(result)NEWLINENEWLINE userinfo = users.objects.get(username=username)NEWLINENEWLINENEWLINE if userinfo:NEWLINE x= str(random.randint(1, 20000000))NEWLINE resume_path=os.path.join("media", username+x+resume.name)NEWLINENEWLINE iplay=play.objects.get(user=userinfo)NEWLINE ipos=position.objects.get(position_id=int(pos_id))NEWLINE apply.objects.create(resume_path=resume_path,ee_id=iplay.ee_id,position_id=ipos)NEWLINENEWLINENEWLINE destination = open(resume_path,'wb+') # 打开特定的文件进行二进制的写操作NEWLINE for chunk in resume.chunks(): # 分块写入文件NEWLINE destination.write(chunk)NEWLINE destination.close()NEWLINENEWLINE return render(request, "apply_job.html")NEWLINENEWLINENEWLINE@csrf_exemptNEWLINE# 得到简历的路径NEWLINEdef get_resume_url(request):NEWLINE result = {'verdict':'ok','message':'successful'}NEWLINE if request.method == "POST":NEWLINE iinterviewee = request.POST['ee_id']NEWLINE iposition = request.POST['pos_id']NEWLINE interviewee_obj=interviewee.objects.get(ee_id=iinterviewee)NEWLINE position_obj=position.objects.get(position_id=iposition)NEWLINE iapply=apply.objects.get(ee_id=interviewee_obj,position_id=position_obj)NEWLINE result["resume_url"]=iapply.resume_pathNEWLINE # print (result["resume_url"])NEWLINE return JsonResponse(result)NEWLINENEWLINENEWLINENEWLINENEWLINE# 得到工作信息NEWLINEdef get_job_information(request):NEWLINE result = {'verdict':'ok','message':'successful'}NEWLINE job_list=[]NEWLINE if request.method == "GET":NEWLINE positions=position.objects.all()NEWLINE for i in positions:NEWLINE job_list.append(i.becomedict())NEWLINE result["job_list"]=job_listNEWLINE return JsonResponse(result)NEWLINENEWLINENEWLINENEWLINENEWLINE@csrf_exemptNEWLINE# 所有申请者情况信息NEWLINEdef applicants_list(request):NEWLINE result = {'verdict':'ok','message':'successful'}NEWLINE if request.method == 'POST':NEWLINE result["page"]="1"NEWLINE jishuqi=0NEWLINE rows=[]NEWLINE jishuqi=apply.objects.all().count()NEWLINE applys=apply.objects.all()NEWLINE for app in applys:NEWLINE row={}NEWLINE row["applicant_id"]=app.ee_id.ee_idNEWLINE row["job_id"]=app.position_id.position_idNEWLINE iplay=play.objects.get(ee_id=app.ee_id)NEWLINE row["interviewer"]=iplay.user.username #面试者名字NEWLINE row["status"]=app.statusNEWLINENEWLINE iinterview=interview.objects.get(apply_id=app)NEWLINE iplay=play.objects.get(er_id=iinterview.er_id)NEWLINE row["name"]=iplay.user.username #面试官名字NEWLINE iinterview=interview.objects.get(apply_id=app)NEWLINE row["date"]=iinterview.date #面试时间NEWLINE rows.append(row)NEWLINENEWLINENEWLINE result["total"]=jishuqiNEWLINE result["records"]=jishuqiNEWLINE result["rows"]=rowsNEWLINE return JsonResponse(result)NEWLINENEWLINENEWLINENEWLINENEWLINENEWLINE
class RadixSort:NEWLINE def __init__(self, arr):NEWLINE self.arr = arrNEWLINENEWLINE def _count_sort(self, exp):NEWLINE n = len(self.arr)NEWLINE sorted_arr = [0] * (n)NEWLINE count_arr = [0] * (10)NEWLINE for i in range(n):NEWLINE index = int(self.arr[i]/exp)NEWLINE count_arr[index%10] += 1NEWLINE for i in range(1, 10):NEWLINE count_arr[i] += count_arr[i-1]NEWLINE for i in range(n-1,-1,-1):NEWLINE index = int(self.arr[i]/exp)NEWLINE sorted_arr[count_arr[index%10]-1] = self.arr[i]NEWLINE count_arr[index%10] -= 1NEWLINE self.arr = sorted_arrNEWLINENEWLINE def _radix_sort(self):NEWLINE maximum = max(self.arr)NEWLINE exp = 1NEWLINE while maximum/exp > 0:NEWLINE self._count_sort(exp)NEWLINE exp *= 10NEWLINENEWLINE def result(self):NEWLINE self._radix_sort()NEWLINE return self.arr
_base_ = './mask_rcnn_r101_fpn_1x_coco.py'NEWLINEmodel = dict(NEWLINE pretrained='open-mmlab://resnext101_32x4d',NEWLINE backbone=dict(NEWLINE type='ResNeXt',NEWLINE depth=101,NEWLINE groups=32,NEWLINE base_width=4,NEWLINE num_stages=4,NEWLINE out_indices=(0, 1, 2, 3),NEWLINE frozen_stages=1,NEWLINE norm_cfg=dict(type='BN', requires_grad=True),NEWLINE style='pytorch'))NEWLINE
import socketNEWLINENEWLINEdef Server(host="127.0.0.1",porta=8585):NEWLINE """NEWLINE -> Servido TCPNEWLINE :param host: Ip para o ServidoNEWLINE :param porta: Porta de ComunicaçãoNEWLINE :return: NoneNEWLINE """NEWLINE s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)NEWLINE msg = f"[+]Você se conector ao {host}."NEWLINE s.bind((host, porta))NEWLINE s.listen(1)NEWLINE while True:NEWLINE c, e = s.accept()NEWLINE print("Conectado com ", e)NEWLINE c.send(msg.encode('utf-8'))NEWLINE c.close()NEWLINENEWLINE#Server()
"""NEWLINEGiven a Singly Linked-List, implement a method to insert a node at a specific position.NEWLINEIf the given position is greater than the list size, simply insert the node at the end.NEWLINENEWLINEExample:NEWLINEGiven 1->2->3,NEWLINENEWLINEinsert_at_pos(data,position) :NEWLINEinsert_at_pos(4,2) ==> 1->4->2->3NEWLINENEWLINE*position=2 means 2nd node in the listNEWLINE"""NEWLINENEWLINENEWLINEclass SinglyLinkedList:NEWLINE # constructorNEWLINE def __init__(self):NEWLINE self.head = NoneNEWLINENEWLINE # method for setting the head of the Linked ListNEWLINE def setHead(self, head):NEWLINE self.head = headNEWLINENEWLINE # Method for inserting a new node at the start of a Linked ListNEWLINE def insert_at_pos(self, data, pos):NEWLINE new_node = Node()NEWLINE new_node.setData(data)NEWLINE if not self.head or pos == 1:NEWLINE new_node.setNext(self.head)NEWLINE self.setHead(new_node)NEWLINE returnNEWLINENEWLINE current = self.headNEWLINE i = 1NEWLINE while current.getNext() != None:NEWLINE if i == pos - 1:NEWLINE new_node.setNext(current.getNext())NEWLINE current.setNext(new_node)NEWLINE returnNEWLINE else:NEWLINE i += 1NEWLINE current = current.getNext()NEWLINE current.setNext(new_node)
import osNEWLINEimport reNEWLINEimport shutilNEWLINEimport subprocessNEWLINEimport jsonNEWLINENEWLINEfrom core.python import help_textNEWLINEfrom core.python.python_terraform import IsNotFlaggedNEWLINEfrom core.python.utils import PatchedTerraform as TerraformNEWLINEfrom core.python.utils import (NEWLINE check_own_ip_address,NEWLINE create_dir_if_nonexistent,NEWLINE create_or_update_yaml_file,NEWLINE dirs_at_location,NEWLINE display_terraform_step_error,NEWLINE extract_cgid_from_dir_name,NEWLINE find_scenario_dir,NEWLINE find_scenario_instance_dir,NEWLINE generate_cgid,NEWLINE generate_cgid_using_username,NEWLINE ip_address_or_range_is_valid,NEWLINE load_and_validate_whitelist,NEWLINE load_data_from_yaml_file,NEWLINE normalize_scenario_name,NEWLINE)NEWLINENEWLINENEWLINEclass CloudGoat:NEWLINE def __init__(self, base_dir):NEWLINE self.base_dir = base_dirNEWLINE self.config_path = os.path.join(self.base_dir, "config.yml")NEWLINE self.scenarios_dir = os.path.join(base_dir, "scenarios")NEWLINE self.scenario_names = dirs_at_location(self.scenarios_dir, names_only=True)NEWLINE self.whitelist_path = os.path.join(base_dir, "whitelist.txt")NEWLINENEWLINE self.aws_region = "us-east-1"NEWLINE self.cloudgoat_commands = ["config", "create", "destroy", "list", "help"]NEWLINE self.non_scenario_instance_dirs = [NEWLINE ".git",NEWLINE "__pycache__",NEWLINE "core",NEWLINE "scenarios",NEWLINE "trash",NEWLINE ]NEWLINENEWLINE def parse_and_execute_command(self, parsed_args):NEWLINE command = parsed_args.commandNEWLINE profile = parsed_args.profileNEWLINENEWLINE # Display help text. Putting this first makes validation simpler.NEWLINE if len(command) == 0 \NEWLINE or command[0] in ["help", "-h", "--help"] \NEWLINE or (len(command) >= 2 and command[-1] == "help"):NEWLINE return self.display_cloudgoat_help(command)NEWLINENEWLINE # ValidationNEWLINE if len(command) == 1:NEWLINE if command[0] == "config":NEWLINE print(NEWLINE f'The {command[0]} currently must be used with "whitelist",'NEWLINE f' "profile", or "help".'NEWLINE )NEWLINE returnNEWLINE elif command[0] == "create":NEWLINE print(NEWLINE f"The {command[0]} command must be used with either a scenario name"NEWLINE f' or "help".'NEWLINE f"\nAll scenarios:\n " + "\n ".join(self.scenario_names)NEWLINE )NEWLINE returnNEWLINE elif command[0] == "destroy":NEWLINE print(NEWLINE f"The {command[0]} command must be used with a scenario name,"NEWLINE f' "all", or "help".'NEWLINE f"\nAll scenarios:\n " + "\n ".join(self.scenario_names)NEWLINE )NEWLINE returnNEWLINE elif command[0] == "list":NEWLINE print(NEWLINE f"The {command[0]} command must be used with a scenario name,"NEWLINE f' "all", "deployed", "undeployed", or "help".'NEWLINE f"\nAll scenarios:\n " + "\n ".join(self.scenario_names)NEWLINE )NEWLINE returnNEWLINENEWLINE if command[0] in ("create", "destroy", "list"):NEWLINE if command[1].lower() in self.cloudgoat_commands:NEWLINE print(f"CloudGoat scenarios cannot be named after CloudGoat commands.")NEWLINE returnNEWLINE if command[1] in self.non_scenario_instance_dirs:NEWLINE print(NEWLINE f'The name "{command[1]}" is reserved for CloudGoat and may not be'NEWLINE f" used with the {command[0]} command."NEWLINE )NEWLINE returnNEWLINENEWLINE if command[0] in ("create", "destroy"):NEWLINE if not profile:NEWLINE if os.path.exists(self.config_path):NEWLINE profile = load_data_from_yaml_file(NEWLINE self.config_path, "default-profile"NEWLINE )NEWLINE user_name = load_data_from_yaml_file(NEWLINE self.config_path, "user-name"NEWLINE )NEWLINE if not profile:NEWLINE print(NEWLINE f"The {command[0]} command requires the use of the --profile"NEWLINE f" flag, or a default profile defined in the config.yml file"NEWLINE f' (try "config profile").'NEWLINE )NEWLINE returnNEWLINE else:NEWLINE print(f'Using default profile "{profile}" from config.yml...')NEWLINENEWLINE # ExecutionNEWLINE if command[0] == "config":NEWLINE if command[1] == "whitelist" or command[1] == "whitelist.txt":NEWLINE return self.configure_or_check_whitelist(NEWLINE auto=parsed_args.auto, print_values=TrueNEWLINE )NEWLINE elif command[1] == "profile":NEWLINE return self.configure_or_check_default_profile()NEWLINE elif command[1] == "argcomplete":NEWLINE return self.configure_argcomplete()NEWLINENEWLINE elif command[0] == "create":NEWLINE return self.create_scenario(command[1], profile, user_name)NEWLINENEWLINE elif command[0] == "destroy":NEWLINE if command[1] == "all":NEWLINE return self.destroy_all_scenarios(profile)NEWLINE else:NEWLINE return self.destroy_scenario(command[1], profile)NEWLINENEWLINE elif command[0] == "list":NEWLINE if command[1] == "all":NEWLINE return self.list_all_scenarios()NEWLINE elif command[1] == "deployed":NEWLINE return self.list_deployed_scenario_instances()NEWLINE elif command[1] == "undeployed":NEWLINE return self.list_undeployed_scenarios()NEWLINE else:NEWLINE return self.list_scenario_instance(command[1])NEWLINENEWLINE print(f'Unrecognized command. Try "cloudgoat.py help"')NEWLINE returnNEWLINENEWLINE def display_cloudgoat_help(self, command):NEWLINE if not command or len(command) == 1:NEWLINE return print(help_text.CLOUDGOAT)NEWLINENEWLINE # Makes "help foo" equivalent to "foo help".NEWLINE command.remove("help")NEWLINENEWLINE if command[0] == "config":NEWLINE if len(command) > 1 and command[1] == "argcomplete":NEWLINE return print(help_text.CONFIG_ARGCOMPLETE)NEWLINE else:NEWLINE return print(help_text.CONFIG)NEWLINE elif command[0] == "create":NEWLINE return print(help_text.CREATE)NEWLINE elif command[0] == "destroy":NEWLINE return print(help_text.DESTROY)NEWLINE elif command[0] == "list":NEWLINE return print(help_text.LIST)NEWLINE elif command[0] == "help":NEWLINE if all([word == "help" for word in command]):NEWLINE joined_help_texts = " ".join(["help text for" for word in command])NEWLINE return print(f"Displays {joined_help_texts} CloudGoat.")NEWLINE else:NEWLINE scenario_name = normalize_scenario_name(command[0])NEWLINE scenario_dir_path = find_scenario_dir(self.scenarios_dir, scenario_name)NEWLINE if scenario_dir_path:NEWLINE scenario_help_text = load_data_from_yaml_file(NEWLINE os.path.join(scenario_dir_path, "manifest.yml"), "help"NEWLINE ).strip()NEWLINE return print(NEWLINE f"[cloudgoat scenario: {scenario_name}]\n{scenario_help_text}"NEWLINE )NEWLINENEWLINE return print(NEWLINE f'Unrecognized command or scenario name. Try "cloudgoat.py help" or'NEWLINE f' "cloudgoat.py list all"'NEWLINE )NEWLINENEWLINE def configure_argcomplete(self):NEWLINE print(help_text.CONFIG_ARGCOMPLETE)NEWLINENEWLINE def configure_or_check_default_profile(self):NEWLINE if not os.path.exists(self.config_path):NEWLINE create_config_file_now = input(NEWLINE f"No configuration file was found at {self.config_path}"NEWLINE f"\nWould you like to create this file with a default profile name now?"NEWLINE f" [y/n]: "NEWLINE )NEWLINE default_profile = NoneNEWLINE else:NEWLINE print(f"A configuration file exists at {self.config_path}")NEWLINE default_profile = load_data_from_yaml_file(NEWLINE self.config_path, "default-profile"NEWLINE )NEWLINE user_name = load_data_from_yaml_file(NEWLINE self.config_path, "user-name"NEWLINE )NEWLINE if default_profile:NEWLINE print(f'It specifies a default profile name of "{default_profile}".')NEWLINE print(f'And user name of "{user_name}".')NEWLINE else:NEWLINE print(f"It does not contain a default profile name.")NEWLINE create_config_file_now = input(NEWLINE f"Would you like to specify a new default profile name for the"NEWLINE f" configuration file now? [y/n]: "NEWLINE )NEWLINENEWLINE if not create_config_file_now.strip().lower().startswith("y"):NEWLINE returnNEWLINENEWLINE while True:NEWLINE default_profile = input(NEWLINE f"Enter the name of your default AWS profile: "NEWLINE ).strip()NEWLINENEWLINE user_name = input(NEWLINE f"Enter your last name: "NEWLINE ).strip()NEWLINENEWLINE if default_profile:NEWLINE create_or_update_yaml_file(NEWLINE self.config_path, {"default-profile": default_profile}NEWLINE )NEWLINENEWLINE create_or_update_yaml_file(NEWLINE self.config_path, {"user-name": user_name}NEWLINE )NEWLINENEWLINE print(f'A default profile name of "{default_profile}" has been saved.')NEWLINE print(f'A user name of "{user_name}" has been saved.')NEWLINE breakNEWLINE else:NEWLINE print(f"Enter your default profile's name, or hit ctrl-c to exit.")NEWLINE continueNEWLINENEWLINE returnNEWLINENEWLINE def configure_or_check_whitelist(self, auto=False, print_values=False):NEWLINE if auto:NEWLINE message = (NEWLINE f"CloudGoat can automatically make a network request, using "NEWLINE f"https://ifconfig.co to find your IP address, and then overwrite the"NEWLINE f" contents of the whitelist file with the result."NEWLINE f"\nWould you like to continue? [y/n]: "NEWLINE )NEWLINENEWLINE if os.path.exists(self.whitelist_path):NEWLINE confirm_auto_configure = input(NEWLINE f"A whitelist.txt file was found at {self.whitelist_path}\n\n{message}"NEWLINE )NEWLINE else:NEWLINE confirm_auto_configure = input(NEWLINE f"No whitelist.txt file was found at {self.whitelist_path}\n\n{message}"NEWLINE )NEWLINENEWLINE if confirm_auto_configure.strip().lower().startswith("y"):NEWLINE ip_address = check_own_ip_address()NEWLINENEWLINE if ip_address is None:NEWLINE print(f"\n[cloudgoat] Unknown error: Unable to retrieve IP address.\n")NEWLINE return NoneNEWLINENEWLINE ip_address = f"{ip_address}/32"NEWLINENEWLINE if ip_address_or_range_is_valid(ip_address):NEWLINE with open(self.whitelist_path, "w") as whitelist_file:NEWLINE whitelist_file.write(ip_address)NEWLINENEWLINE print(f"\nwhitelist.txt created with IP address {ip_address}")NEWLINENEWLINE return load_and_validate_whitelist(self.whitelist_path)NEWLINENEWLINE else:NEWLINE print(NEWLINE f"\n[cloudgoat] Unknown error: Did not receive a valid IP"NEWLINE f" address. Received this instead:\n{ip_address}\n"NEWLINE )NEWLINE return NoneNEWLINENEWLINE else:NEWLINE print(f"Automatic whitelist.txt configuration cancelled.")NEWLINE return NoneNEWLINENEWLINE elif not os.path.exists(self.whitelist_path):NEWLINE create_whitelist_now = input(NEWLINE f"No IP address whitelist was found at {self.whitelist_path}"NEWLINE f"\nCloudGoat requires a whitelist.txt file to exist before the"NEWLINE f' "create" command can be used.'NEWLINE f"\nWould you like to make one now? [y/n]: "NEWLINE )NEWLINENEWLINE if not create_whitelist_now.strip().lower().startswith("y"):NEWLINE return NoneNEWLINENEWLINE while True:NEWLINE ip_address = input(NEWLINE f"\nEnter a valid IP address, optionally with CIDR notation: "NEWLINE ).strip()NEWLINENEWLINE if not re.findall(r".*\/(\d+)", ip_address):NEWLINE ip_address = ip_address.split("/")[0] + "/32"NEWLINENEWLINE if ip_address_or_range_is_valid(ip_address):NEWLINE with open(self.whitelist_path, "w") as whitelist_file:NEWLINE whitelist_file.write(ip_address)NEWLINENEWLINE print(f"\nwhitelist.txt created with IP address {ip_address}")NEWLINENEWLINE return load_and_validate_whitelist(self.whitelist_path)NEWLINENEWLINE else:NEWLINE print(f"\nInvalid IP address.")NEWLINE continueNEWLINENEWLINE else:NEWLINE print(f"Loading whitelist.txt...")NEWLINE whitelist = load_and_validate_whitelist(self.whitelist_path)NEWLINE if whitelist:NEWLINE print(NEWLINE f"A whitelist.txt file was found that contains at least one valid"NEWLINE f" IP address or range."NEWLINE )NEWLINE if print_values:NEWLINE print(f"Whitelisted IP addresses:\n " + "\n ".join(whitelist))NEWLINE return whitelistNEWLINENEWLINE def create_scenario(self, scenario_name_or_path, profile, user_name):NEWLINE scenario_name = normalize_scenario_name(scenario_name_or_path)NEWLINE scenario_dir = os.path.join(self.scenarios_dir, scenario_name)NEWLINENEWLINE if not scenario_dir or not scenario_name or not os.path.exists(scenario_dir):NEWLINE if not scenario_name:NEWLINE return print(NEWLINE f"No recognized scenario name was entered. Did you mean one of"NEWLINE f" these?\n " + f"\n ".join(self.scenario_names)NEWLINE )NEWLINE else:NEWLINE return print(NEWLINE f"No scenario named {scenario_name} exists in the scenarios"NEWLINE f" directory. Did you mean one of these?"NEWLINE f"\n " + f"\n ".join(self.scenario_names)NEWLINE )NEWLINENEWLINE if not os.path.exists(self.whitelist_path):NEWLINE cg_whitelist = self.configure_or_check_whitelist(auto=True)NEWLINE else:NEWLINE cg_whitelist = self.configure_or_check_whitelist()NEWLINENEWLINE if not cg_whitelist:NEWLINE print(NEWLINE f"A valid whitelist.txt file must exist in the {self.base_dir}"NEWLINE f' directory before "create" may be used.'NEWLINE )NEWLINE returnNEWLINENEWLINE # Create a scenario-instance folder in the project root directory.NEWLINE # This command should fail with an explanatory error message if aNEWLINE # scenario-instance of the same root name (i.e. without the CGID) alreadyNEWLINE # exists.NEWLINE instance_path = find_scenario_instance_dir(self.base_dir, scenario_name)NEWLINE if instance_path is not None:NEWLINE print(NEWLINE f"\n*************************************************************************************************\n"NEWLINE f"Updating previously deployed {scenario_name} scenario. \n\n"NEWLINE f"To recreate this scenario from scratch instead, run `./cloudgoat destroy {scenario_name}` first."NEWLINE f"\n*************************************************************************************************\n"NEWLINE )NEWLINE else:NEWLINE cgid = generate_cgid_using_username(user_name)NEWLINE instance_path = os.path.join(NEWLINE self.base_dir, f"{scenario_name}_{cgid}"NEWLINE )NEWLINENEWLINE # Copy all the terraform files from the "/scenarios/scenario-name" folderNEWLINE # to the scenario-instance folder.NEWLINE source_dir_contents = os.path.join(scenario_dir, ".")NEWLINE shutil.copytree(source_dir_contents, instance_path)NEWLINENEWLINE if os.path.exists(os.path.join(instance_path, "start.sh")):NEWLINE print(f"\nNow running {scenario_name}'s start.sh...")NEWLINE start_script_process = subprocess.Popen(NEWLINE ["sh", "start.sh"], cwd=instance_pathNEWLINE )NEWLINE start_script_process.wait()NEWLINE else:NEWLINE passNEWLINENEWLINE terraform = Terraform(NEWLINE working_dir=os.path.join(instance_path, "terraform")NEWLINE )NEWLINENEWLINE init_retcode, init_stdout, init_stderr = terraform.init(NEWLINE capture_output=False, no_color=IsNotFlaggedNEWLINE )NEWLINE if init_retcode != 0:NEWLINE display_terraform_step_error(NEWLINE "terraform init", init_retcode, init_stdout, init_stderrNEWLINE )NEWLINE returnNEWLINE else:NEWLINE print(f"\n[cloudgoat] terraform init completed with no error code.")NEWLINE cgid = instance_path.split('/')[-1].split('_')[-1]NEWLINE plan_retcode, plan_stdout, plan_stderr = terraform.plan(NEWLINE capture_output=False,NEWLINE var={NEWLINE "cgid": cgid,NEWLINE "cg_whitelist": cg_whitelist,NEWLINE "profile": profile,NEWLINE "region": self.aws_region,NEWLINE },NEWLINE no_color=IsNotFlagged,NEWLINE )NEWLINE # For some reason, `python-terraform`'s `terraform init` returns "2" evenNEWLINE # when it appears to succeed. For that reason, it will temporarily permitNEWLINE # retcode 2.NEWLINE if plan_retcode not in (0, 2):NEWLINE display_terraform_step_error(NEWLINE "terraform plan", plan_retcode, plan_stdout, plan_stderrNEWLINE )NEWLINE returnNEWLINE else:NEWLINE print(f"\n[cloudgoat] terraform plan completed with no error code.")NEWLINENEWLINE apply_retcode, apply_stdout, apply_stderr = terraform.apply(NEWLINE capture_output=False,NEWLINE var={NEWLINE "cgid": cgid,NEWLINE "cg_whitelist": cg_whitelist,NEWLINE "profile": profile,NEWLINE "region": self.aws_region,NEWLINE },NEWLINE skip_plan=True,NEWLINE no_color=IsNotFlagged,NEWLINE )NEWLINE if apply_retcode != 0:NEWLINE display_terraform_step_error(NEWLINE "terraform apply", apply_retcode, apply_stdout, apply_stderrNEWLINE )NEWLINE returnNEWLINE else:NEWLINE print(f"\n[cloudgoat] terraform apply completed with no error code.")NEWLINENEWLINE # python-terraform uses the '-json' flag by default.NEWLINE # The documentation for `output` suggests using output_cmd to receive theNEWLINE # library's standard threeple return value.NEWLINE # Can't use capture_output here because we need to write stdout to a file.NEWLINE output_retcode, output_stdout, output_stderr = terraform.output_cmd('--json')NEWLINENEWLINE if output_retcode != 0:NEWLINE display_terraform_step_error(NEWLINE "terraform output", output_retcode, output_stdout, output_stderrNEWLINE )NEWLINE returnNEWLINE else:NEWLINE print(f"\n[cloudgoat] terraform output completed with no error code.")NEWLINENEWLINE # Within this output will be values that begin with "cloudgoat_output".NEWLINE # Each line of console output which contains this tag will be written intoNEWLINE # a text file named "start.txt" in the scenario-instance folder.NEWLINE start_file_path = os.path.join(instance_path, "start.txt")NEWLINE with open(start_file_path, "w") as start_file:NEWLINE output = json.loads(output_stdout)NEWLINE for k, v in output.items():NEWLINE l = f"{k} = {v['value']}"NEWLINE print(l)NEWLINE start_file.write(l + '\n')NEWLINENEWLINE print(f"\n[cloudgoat] Output file written to:\n\n {start_file_path}\n")NEWLINENEWLINE def destroy_all_scenarios(self, profile):NEWLINE # Information gathering.NEWLINE extant_scenario_instance_names_and_paths = list()NEWLINE for scenario_name in self.scenario_names:NEWLINE scenario_instance_dir_path = find_scenario_instance_dir(NEWLINE self.base_dir, scenario_nameNEWLINE )NEWLINENEWLINE if scenario_instance_dir_path is None:NEWLINE continueNEWLINE else:NEWLINE extant_scenario_instance_names_and_paths.append(NEWLINE (scenario_name, scenario_instance_dir_path)NEWLINE )NEWLINE print(f"Scenario instance for {scenario_name} found.")NEWLINENEWLINE if not extant_scenario_instance_names_and_paths:NEWLINE print(f"\n No scenario instance directories exist.\n")NEWLINE returnNEWLINE else:NEWLINE print(NEWLINE f"\n {len(extant_scenario_instance_names_and_paths)} scenario"NEWLINE f" instance directories found."NEWLINE )NEWLINENEWLINE # Iteration.NEWLINE success_count, failure_count, skipped_count = 0, 0, 0NEWLINENEWLINE for scenario_name, instance_path in extant_scenario_instance_names_and_paths:NEWLINE print(f"\n--------------------------------\n")NEWLINENEWLINE # Confirmation.NEWLINE delete_permission = input(f'Destroy "{scenario_name}"? [y/n]: ')NEWLINENEWLINE if not delete_permission.strip()[0].lower() == "y":NEWLINE skipped_count += 1NEWLINE print(f"\nSkipped destruction of {scenario_name}.\n")NEWLINE continueNEWLINENEWLINE # Terraform execution.NEWLINE terraform_directory = os.path.join(instance_path, "terraform")NEWLINENEWLINE if os.path.exists(os.path.join(terraform_directory, "terraform.tfstate")):NEWLINE terraform = Terraform(working_dir=terraform_directory)NEWLINENEWLINE cgid = extract_cgid_from_dir_name(os.path.basename(instance_path))NEWLINENEWLINE destroy_retcode, destroy_stdout, destroy_stderr = terraform.destroy(NEWLINE capture_output=False,NEWLINE var={NEWLINE "cgid": cgid,NEWLINE "cg_whitelist": list(),NEWLINE "profile": profile,NEWLINE "region": self.aws_region,NEWLINE },NEWLINE no_color=IsNotFlagged,NEWLINE )NEWLINE if destroy_retcode != 0:NEWLINE display_terraform_step_error(NEWLINE "terraform destroy",NEWLINE destroy_retcode,NEWLINE destroy_stdout,NEWLINE destroy_stderr,NEWLINE )NEWLINE failure_count += 1NEWLINE # Subsequent destroys should not be skipped when one fails.NEWLINE continueNEWLINE else:NEWLINE print(NEWLINE f"\n[cloudgoat] terraform destroy completed with no error code."NEWLINE )NEWLINE else:NEWLINE print(NEWLINE f"\nNo terraform.tfstate file was found in the scenario instance's"NEWLINE f' terraform directory, so "terraform destroy" will not be run.'NEWLINE )NEWLINENEWLINE # Scenario instance directory trashing.NEWLINE trash_dir = create_dir_if_nonexistent(self.base_dir, "trash")NEWLINENEWLINE trashed_instance_path = os.path.join(NEWLINE trash_dir, os.path.basename(instance_path)NEWLINE )NEWLINENEWLINE shutil.move(instance_path, trashed_instance_path)NEWLINENEWLINE success_count += 1NEWLINENEWLINE print(NEWLINE f"\nSuccessfully destroyed {scenario_name}."NEWLINE f"\nScenario instance files have been moved to {trashed_instance_path}"NEWLINE )NEWLINENEWLINE # Iteration summary.NEWLINE print(NEWLINE f"\nDestruction complete."NEWLINE f"\n {success_count} scenarios successfully destroyed"NEWLINE f"\n {failure_count} destroys failed"NEWLINE f"\n {skipped_count} skipped\n"NEWLINE )NEWLINENEWLINE returnNEWLINENEWLINE def destroy_scenario(self, scenario_name_or_path, profile, confirmed=False):NEWLINE # Information gathering.NEWLINE scenario_name = normalize_scenario_name(scenario_name_or_path)NEWLINE scenario_instance_dir_path = find_scenario_instance_dir(NEWLINE self.base_dir, scenario_nameNEWLINE )NEWLINENEWLINE if scenario_instance_dir_path is None:NEWLINE print(NEWLINE f'[cloudgoat] Error: No scenario instance for "{scenario_name}" found.'NEWLINE f" Try: cloudgoat.py list deployed"NEWLINE )NEWLINE returnNEWLINENEWLINE instance_name = os.path.basename(scenario_instance_dir_path)NEWLINENEWLINE # Confirmation.NEWLINE if not confirmed:NEWLINE delete_permission = input(f'Destroy "{instance_name}"? [y/n]: ').strip()NEWLINE if not delete_permission or not delete_permission[0].lower() == "y":NEWLINE print(f"\nCancelled destruction of {instance_name}.\n")NEWLINE returnNEWLINENEWLINE # Terraform execution.NEWLINE terraform_directory = os.path.join(scenario_instance_dir_path, "terraform")NEWLINENEWLINE if os.path.exists(os.path.join(terraform_directory, "terraform.tfstate")):NEWLINE terraform = Terraform(working_dir=terraform_directory)NEWLINENEWLINE cgid = extract_cgid_from_dir_name(NEWLINE os.path.basename(scenario_instance_dir_path)NEWLINE )NEWLINENEWLINE destroy_retcode, destroy_stdout, destroy_stderr = terraform.destroy(NEWLINE capture_output=False,NEWLINE var={NEWLINE "cgid": cgid,NEWLINE "cg_whitelist": list(),NEWLINE "profile": profile,NEWLINE "region": self.aws_region,NEWLINE },NEWLINE no_color=IsNotFlagged,NEWLINE )NEWLINE if destroy_retcode != 0:NEWLINE display_terraform_step_error(NEWLINE "terraform destroy", destroy_retcode, destroy_stdout, destroy_stderrNEWLINE )NEWLINE returnNEWLINE else:NEWLINE print("\n[cloudgoat] terraform destroy completed with no error code.")NEWLINE else:NEWLINE print(NEWLINE f"\nNo terraform.tfstate file was found in the scenario instance's"NEWLINE f' terraform directory, so "terraform destroy" will not be run.'NEWLINE )NEWLINENEWLINE # Scenario instance directory trashing.NEWLINE trash_dir = create_dir_if_nonexistent(self.base_dir, "trash")NEWLINENEWLINE trashed_instance_path = os.path.join(NEWLINE trash_dir, os.path.basename(scenario_instance_dir_path)NEWLINE )NEWLINENEWLINE shutil.move(scenario_instance_dir_path, trashed_instance_path)NEWLINENEWLINE print(NEWLINE f"\nSuccessfully destroyed {instance_name}."NEWLINE f"\nScenario instance files have been moved to {trashed_instance_path}"NEWLINE )NEWLINENEWLINE returnNEWLINENEWLINE def list_all_scenarios(self):NEWLINE undeployed_scenarios = list()NEWLINE deployed_scenario_instance_paths = list()NEWLINENEWLINE for scenario_name in self.scenario_names:NEWLINE scenario_instance_dir_path = find_scenario_instance_dir(NEWLINE self.base_dir, scenario_nameNEWLINE )NEWLINE if scenario_instance_dir_path:NEWLINE deployed_scenario_instance_paths.append(scenario_instance_dir_path)NEWLINENEWLINE else:NEWLINE undeployed_scenarios.append(scenario_name)NEWLINENEWLINE print(NEWLINE f"\n Deployed scenario instances: {len(deployed_scenario_instance_paths)}"NEWLINE )NEWLINENEWLINE for scenario_instance_dir_path in deployed_scenario_instance_paths:NEWLINE directory_name = os.path.basename(scenario_instance_dir_path)NEWLINE scenario_name, cgid = directory_name.split("_cgid")NEWLINE print(NEWLINE f"\n {scenario_name}"NEWLINE f"\n CGID: {'cgid' + cgid}"NEWLINE f"\n Path: {scenario_instance_dir_path}"NEWLINE )NEWLINENEWLINE print(f"\n Undeployed scenarios: {len(undeployed_scenarios)}")NEWLINENEWLINE # Visual spacing.NEWLINE if undeployed_scenarios:NEWLINE print(f"")NEWLINENEWLINE for scenario_name in undeployed_scenarios:NEWLINE print(f" {scenario_name}")NEWLINENEWLINE print(f"")NEWLINENEWLINE def list_deployed_scenario_instances(self):NEWLINE deployed_scenario_instances = list()NEWLINE for scenario_name in self.scenario_names:NEWLINE scenario_instance_dir_path = find_scenario_instance_dir(NEWLINE self.base_dir, scenario_nameNEWLINE )NEWLINENEWLINE if scenario_instance_dir_path is None:NEWLINE continueNEWLINE else:NEWLINE deployed_scenario_instances.append(scenario_instance_dir_path)NEWLINENEWLINE if not deployed_scenario_instances:NEWLINE print(NEWLINE f'\n No scenario instance directories exist. Try "list undeployed" or'NEWLINE f' "list all"\n'NEWLINE )NEWLINE returnNEWLINE else:NEWLINE print(NEWLINE f"\n Deployed scenario instances: {len(deployed_scenario_instances)}"NEWLINE )NEWLINENEWLINE for scenario_instance_dir_path in deployed_scenario_instances:NEWLINE directory_name = os.path.basename(scenario_instance_dir_path)NEWLINE scenario_name, cgid = directory_name.split("_cgid")NEWLINENEWLINE print(NEWLINE f"\n {scenario_name}"NEWLINE f"\n CGID: {'cgid' + cgid}"NEWLINE f"\n Path: {scenario_instance_dir_path}"NEWLINE )NEWLINENEWLINE print("")NEWLINENEWLINE def list_undeployed_scenarios(self):NEWLINE undeployed_scenarios = list()NEWLINE for scenario_name in self.scenario_names:NEWLINE if not find_scenario_instance_dir(self.base_dir, scenario_name):NEWLINE undeployed_scenarios.append(scenario_name)NEWLINENEWLINE if undeployed_scenarios:NEWLINE return print(NEWLINE f"\n Undeployed scenarios: {len(undeployed_scenarios)}\n\n "NEWLINE + f"\n ".join(undeployed_scenarios)NEWLINE + f"\n"NEWLINE )NEWLINE else:NEWLINE return print(NEWLINE f'\n All scenarios have been deployed. Try "list deployed" or "list'NEWLINE f' all"\n'NEWLINE )NEWLINENEWLINE def list_scenario_instance(self, scenario_name_or_path):NEWLINE scenario_name = normalize_scenario_name(scenario_name_or_path)NEWLINE scenario_instance_dir_path = find_scenario_instance_dir(NEWLINE self.base_dir, scenario_nameNEWLINE )NEWLINENEWLINE if scenario_instance_dir_path is None:NEWLINE print(NEWLINE f'[cloudgoat] Error: No scenario instance for "{scenario_name}" found.'NEWLINE f" Try: cloudgoat.py list deployed"NEWLINE )NEWLINE returnNEWLINENEWLINE terraform = Terraform(NEWLINE working_dir=os.path.join(scenario_instance_dir_path, "terraform")NEWLINE )NEWLINENEWLINE show_retcode, show_stdout, show_stderr = terraform.show(NEWLINE capture_output=False, no_color=IsNotFlaggedNEWLINE )NEWLINE if show_retcode != 0:NEWLINE display_terraform_step_error(NEWLINE "terraform show", show_retcode, show_stdout, show_stderrNEWLINE )NEWLINE returnNEWLINE else:NEWLINE print(f"\n[cloudgoat] terraform show completed with no error code.")NEWLINENEWLINE returnNEWLINENEWLINE
# -*- coding: utf-8 -*-NEWLINE"""NEWLINE 'accounts' resource and schema settings.NEWLINENEWLINE :copyright: (c) 2014 by Nicola Iarocci and CIR2000.NEWLINE :license: BSD, see LICENSE for more details.NEWLINE"""NEWLINEfrom common import base_schema, required_stringNEWLINENEWLINE_schema = {NEWLINE 'u': required_string, # usernameNEWLINE 'p': required_string, # passwordNEWLINE 't': required_string, # tokenNEWLINE 'r': { # roleNEWLINE 'type': 'list',NEWLINE 'allowed': ['admin', 'app', 'user'],NEWLINE 'required': True,NEWLINE }NEWLINE}NEWLINENEWLINEdefinition = {NEWLINE 'url': 'accounts',NEWLINE 'item_title': 'account',NEWLINE # only admins and apps are allowed to consume this endpoint.NEWLINE 'allowed_roles': ['admin', 'app'],NEWLINE 'cache_control': '',NEWLINE 'cache_expires': 0,NEWLINE 'additional_lookup': {NEWLINE 'url': 'regex("[\w]+")', # to be uniqueNEWLINE 'field': 'u'NEWLINE },NEWLINE 'schema': _schema,NEWLINE}NEWLINEdefinition.update(base_schema)NEWLINE
import osNEWLINEimport pandas as pdNEWLINENEWLINEfrom plotly import toolsNEWLINEimport plotly.graph_objs as goNEWLINEimport plotly.offline as offlineNEWLINEfrom matplotlib import colorsNEWLINENEWLINEoffline.init_notebook_mode()NEWLINENEWLINEcolor = {NEWLINE "acaes": "brown",NEWLINE "cavern-acaes": "brown",NEWLINE "gas-ocgt": "gray",NEWLINE "gas-ccgt": "lightgray",NEWLINE "solar-pv": "gold",NEWLINE "wind-onshore": "skyblue",NEWLINE "wind-offshore": "darkblue",NEWLINE "biomass-st": "olivedrab",NEWLINE "battery": "lightsalmon",NEWLINE "electricity": "lightsalmon",NEWLINE "hydro-ror": "aqua",NEWLINE "hydro-phs": "darkred",NEWLINE "hydro-reservoir": "magenta",NEWLINE "hydrogen-storage": "skyblue",NEWLINE "biomass": "olivedrab",NEWLINE "uranium": "yellow",NEWLINE "hydro": "aqua",NEWLINE "wind": "skyblue",NEWLINE "solar": "gold",NEWLINE "gas": "lightgray",NEWLINE "lignite": "chocolate",NEWLINE "coal": "dimgrey",NEWLINE "waste": "yellowgreen",NEWLINE "oil": "black",NEWLINE "import": "pink",NEWLINE "storage": "green",NEWLINE "other": "red",NEWLINE "mixed": "saddlebrown",NEWLINE "mixed-st": "darkcyan",NEWLINE}NEWLINENEWLINEcolor_dict = {name: colors.to_hex(color) for name, color in color.items()}NEWLINENEWLINENEWLINEdef merit_order_plot(scenario, prices, storages):NEWLINE prices = prices[scenario]NEWLINE prices = prices.sort_values(by=["shadow_price"])NEWLINENEWLINE storages = storages[scenario]NEWLINE storages = storages.sort_values(by=["shadow_price"])NEWLINENEWLINE prices["colors"] = [color_dict.get(c, "black") for c in prices.carrier]NEWLINE # text = [str(t)+' '+str(n) for t in prices.index for n in prices.name]NEWLINENEWLINE fig = tools.make_subplots(rows=2, cols=1)NEWLINENEWLINE data = []NEWLINENEWLINE data.append(NEWLINE go.Bar(NEWLINE y=prices.shadow_price,NEWLINE # text = text,NEWLINE opacity=1,NEWLINE name="shadow_price",NEWLINE showlegend=False,NEWLINE width=1.05,NEWLINE marker=dict(color=prices.colors),NEWLINE )NEWLINE )NEWLINENEWLINE fig.append_trace(data[0], 1, 1)NEWLINENEWLINE # just for legend to workNEWLINE for c in prices.carrier.unique():NEWLINE if c == "NONE":NEWLINE fig.append_trace(NEWLINE go.Bar(NEWLINE y=[0],NEWLINE # text = text,NEWLINE name="NONE",NEWLINE marker=dict(color=color_dict.get(c, "black")),NEWLINE ),NEWLINE 1,NEWLINE 1,NEWLINE )NEWLINE else:NEWLINE fig.append_trace(NEWLINE go.Bar(NEWLINE y=[0],NEWLINE # text = text,NEWLINE name=c.title(),NEWLINE marker=dict(color=color_dict.get(c, "black")),NEWLINE ),NEWLINE 1,NEWLINE 1,NEWLINE )NEWLINENEWLINE storage_dispatch_ordered = go.Bar(NEWLINE y=storages["storage_dispatch"],NEWLINE # text = text,NEWLINE name="Storage Dispatch",NEWLINE width=1.04,NEWLINE opacity=1,NEWLINE marker=dict(color="magenta"),NEWLINE )NEWLINENEWLINE fig.append_trace(storage_dispatch_ordered, 2, 1)NEWLINENEWLINE fig["layout"].update(NEWLINE title="Ordered prices and storage dispatch in DE " + scenario,NEWLINE yaxis1=dict(title="Shadow price in € / MWh"),NEWLINE yaxis2=dict(title="Storage dispatch in MWh"),NEWLINE xaxis2=dict(title="Hours of the year"),NEWLINE showlegend=True,NEWLINE # legend=dict(x=0, y=-0),NEWLINE bargap=0,NEWLINE )NEWLINENEWLINE return figNEWLINE
# Import modelsNEWLINEfrom ..models import InputComputeGmx, OutputComputeGmxNEWLINEfrom cmselemental.util.decorators import classpropertyNEWLINENEWLINE# Import componentsNEWLINEfrom mmic_cmd.components import CmdComponentNEWLINEfrom mmic.components.blueprints import GenericComponentNEWLINENEWLINEfrom typing import Dict, Any, List, Tuple, OptionalNEWLINEfrom pathlib import PathNEWLINEimport osNEWLINEimport shutilNEWLINEimport tempfileNEWLINEimport ntpathNEWLINENEWLINENEWLINE__all__ = ["ComputeGmxComponent"]NEWLINENEWLINENEWLINEclass ComputeGmxComponent(GenericComponent):NEWLINE @classpropertyNEWLINE def input(cls):NEWLINE return InputComputeGmxNEWLINENEWLINE @classpropertyNEWLINE def output(cls):NEWLINE return OutputComputeGmxNEWLINENEWLINE @classpropertyNEWLINE def version(cls) -> str:NEWLINE """Finds program, extracts version, returns normalized version string.NEWLINE ReturnsNEWLINE -------NEWLINE strNEWLINE Return a valid, safe python version string.NEWLINE """NEWLINE return ""NEWLINENEWLINE def execute(NEWLINE self,NEWLINE inputs: InputComputeGmx,NEWLINE extra_outfiles: Optional[List[str]] = None,NEWLINE extra_commands: Optional[List[str]] = None,NEWLINE scratch_name: Optional[str] = None,NEWLINE timeout: Optional[int] = None,NEWLINE ) -> Tuple[bool, OutputComputeGmx]:NEWLINENEWLINE # Call gmx pdb2gmx, mdrun, etc. hereNEWLINE if isinstance(inputs, dict):NEWLINE inputs = self.input(**inputs)NEWLINENEWLINE proc_input, mdp_file, gro_file, top_file = (NEWLINE inputs.proc_input,NEWLINE inputs.mdp_file,NEWLINE inputs.molecule,NEWLINE inputs.forcefield,NEWLINE ) # The parameters here are all strNEWLINENEWLINE tpr_file = tempfile.NamedTemporaryFile(suffix=".tpr").name # , delete=False)NEWLINENEWLINE input_model = {NEWLINE "proc_input": proc_input,NEWLINE "mdp_file": mdp_file,NEWLINE "gro_file": gro_file,NEWLINE "top_file": top_file,NEWLINE "tpr_file": tpr_file,NEWLINE }NEWLINENEWLINE clean_files, cmd_input_grompp = self.build_input_grompp(input_model)NEWLINE rvalue = CmdComponent.compute(cmd_input_grompp)NEWLINE grompp_scratch_dir = [str(rvalue.scratch_directory)]NEWLINE self.cleanup(clean_files) # Del mdp and top file in the working dirNEWLINE self.cleanup([inputs.scratch_dir])NEWLINENEWLINE input_model = {"proc_input": proc_input, "tpr_file": tpr_file}NEWLINE cmd_input_mdrun = self.build_input_mdrun(input_model)NEWLINE rvalue = CmdComponent.compute(cmd_input_mdrun)NEWLINE self.cleanup([tpr_file, gro_file])NEWLINE self.cleanup(grompp_scratch_dir)NEWLINENEWLINE return True, self.parse_output(rvalue.dict(), proc_input)NEWLINENEWLINE @staticmethodNEWLINE def cleanup(remove: List[str]):NEWLINE for item in remove:NEWLINE if os.path.isdir(item):NEWLINE shutil.rmtree(item)NEWLINE elif os.path.isfile(item):NEWLINE os.remove(item)NEWLINENEWLINE def build_input_grompp(NEWLINE self,NEWLINE inputs: Dict[str, Any],NEWLINE config: Optional["TaskConfig"] = None,NEWLINE template: Optional[str] = None,NEWLINE ) -> Dict[str, Any]:NEWLINE """NEWLINE Build the input for gromppNEWLINE """NEWLINE assert inputs["proc_input"].engine == "gmx", "Engine must be gmx (Gromacs)!"NEWLINENEWLINE env = os.environ.copy()NEWLINENEWLINE if config:NEWLINE env["MKL_NUM_THREADS"] = str(config.ncores)NEWLINE env["OMP_NUM_THREADS"] = str(config.ncores)NEWLINENEWLINE scratch_directory = config.scratch_directory if config else NoneNEWLINENEWLINE tpr_file = inputs["tpr_file"]NEWLINENEWLINE clean_files = []NEWLINE clean_files.append(inputs["mdp_file"])NEWLINE clean_files.append(inputs["top_file"])NEWLINENEWLINE cmd = [NEWLINE inputs["proc_input"].engine,NEWLINE "grompp",NEWLINE "-f",NEWLINE inputs["mdp_file"],NEWLINE "-c",NEWLINE inputs["gro_file"],NEWLINE "-p",NEWLINE inputs["top_file"],NEWLINE "-o",NEWLINE tpr_file,NEWLINE "-maxwarn",NEWLINE "-1",NEWLINE ]NEWLINE outfiles = [tpr_file]NEWLINENEWLINE return (NEWLINE clean_files,NEWLINE {NEWLINE "command": cmd,NEWLINE "as_binary": [tpr_file],NEWLINE "infiles": [inputs["mdp_file"], inputs["gro_file"], inputs["top_file"]],NEWLINE "outfiles": outfiles,NEWLINE "outfiles_track": outfiles,NEWLINE "scratch_directory": scratch_directory,NEWLINE "environment": env,NEWLINE "scratch_messy": True,NEWLINE },NEWLINE )NEWLINENEWLINE def build_input_mdrun(NEWLINE self,NEWLINE inputs: Dict[str, Any],NEWLINE config: Optional["TaskConfig"] = None,NEWLINE template: Optional[str] = None,NEWLINE ) -> Dict[str, Any]:NEWLINENEWLINE env = os.environ.copy()NEWLINENEWLINE if config:NEWLINE env["MKL_NUM_THREADS"] = str(config.ncores)NEWLINE env["OMP_NUM_THREADS"] = str(config.ncores)NEWLINENEWLINE scratch_directory = config.scratch_directory if config else NoneNEWLINENEWLINE log_file = tempfile.NamedTemporaryFile(suffix=".log").nameNEWLINE trr_file = tempfile.NamedTemporaryFile(suffix=".trr").nameNEWLINE edr_file = tempfile.NamedTemporaryFile(suffix=".edr").nameNEWLINE gro_file = tempfile.NamedTemporaryFile(suffix=".gro").nameNEWLINENEWLINE tpr_file = inputs["tpr_file"]NEWLINE tpr_fname = ntpath.basename(tpr_file)NEWLINENEWLINE cmd = [NEWLINE inputs["proc_input"].engine, # Should here be gmx_mpi?NEWLINE "mdrun",NEWLINE "-s",NEWLINE tpr_file,NEWLINE "-o",NEWLINE trr_file,NEWLINE "-c",NEWLINE gro_file,NEWLINE "-e",NEWLINE edr_file,NEWLINE "-g",NEWLINE log_file,NEWLINE ]NEWLINENEWLINE outfiles = [trr_file, gro_file, edr_file, log_file]NEWLINENEWLINE # For extra argsNEWLINE if inputs["proc_input"].keywords:NEWLINE for key, val in inputs["proc_input"].keywords.items():NEWLINE if val:NEWLINE cmd.extend([key, val])NEWLINE else:NEWLINE cmd.extend([key])NEWLINENEWLINE return {NEWLINE "command": cmd,NEWLINE "as_binary": [NEWLINE tpr_fname,NEWLINE trr_file,NEWLINE edr_file,NEWLINE ], # For outfiles, mmic_cmd does not use ntpath.basename to obtain the basic nameTNEWLINE # Therefore trr and edr do not need to be dealed by ntpath.basenameNEWLINE "infiles": [tpr_file],NEWLINE "outfiles": outfiles,NEWLINE "outfiles_track": outfiles,NEWLINE "scratch_directory": scratch_directory,NEWLINE "environment": env,NEWLINE "scratch_messy": True,NEWLINE }NEWLINENEWLINE def parse_output(NEWLINE self, output: Dict[str, str], inputs: Dict[str, Any]NEWLINE ) -> OutputComputeGmx:NEWLINE # stdout = output["stdout"]NEWLINE # stderr = output["stderr"]NEWLINE outfiles = output["outfiles"]NEWLINE scratch_dir = str(output["scratch_directory"])NEWLINENEWLINE traj, conf, energy, log = outfiles.keys()NEWLINENEWLINE self.cleanup([energy, log]) # Not sure if edr and log should be deletedNEWLINE # But this is the last chance to delete themNEWLINENEWLINE return self.output(NEWLINE proc_input=inputs, molecule=conf, trajectory=traj, scratch_dir=scratch_dirNEWLINE )NEWLINE
"""NEWLINESprite Collect CoinsNEWLINENEWLINESimple program to show basic sprite usage.NEWLINENEWLINEArtwork from http://kenney.nlNEWLINE"""NEWLINEimport randomNEWLINEimport arcadeNEWLINENEWLINESPRITE_SCALING = 0.5NEWLINENEWLINESCREEN_WIDTH = 800NEWLINESCREEN_HEIGHT = 600NEWLINENEWLINEwindow = NoneNEWLINENEWLINENEWLINEclass MyApplication(arcade.Window):NEWLINE """ Main application class. """NEWLINENEWLINE def setup(self):NEWLINE """ Set up the game and initialize the variables. """NEWLINENEWLINE # Sprite listsNEWLINE self.all_sprites_list = arcade.SpriteList()NEWLINE self.coin_list = arcade.SpriteList()NEWLINENEWLINE # Set up the playerNEWLINE self.score = 0NEWLINE self.player_sprite = arcade.Sprite("images/character.png",NEWLINE SPRITE_SCALING)NEWLINE self.player_sprite.center_x = 50NEWLINE self.player_sprite.center_y = 50NEWLINE self.all_sprites_list.append(self.player_sprite)NEWLINENEWLINE for i in range(50):NEWLINENEWLINE # Create the coin instanceNEWLINE coin = arcade.Sprite("images/coin_01.png", SPRITE_SCALING / 3)NEWLINENEWLINE # Position the coinNEWLINE coin.center_x = random.randrange(SCREEN_WIDTH)NEWLINE coin.center_y = random.randrange(SCREEN_HEIGHT)NEWLINENEWLINE # Add the coin to the listsNEWLINE self.all_sprites_list.append(coin)NEWLINE self.coin_list.append(coin)NEWLINENEWLINE # Don't show the mouse cursorNEWLINE self.set_mouse_visible(False)NEWLINENEWLINE # Set the background colorNEWLINE arcade.set_background_color(arcade.color.AMAZON)NEWLINENEWLINE def on_draw(self):NEWLINE """NEWLINE Render the screen.NEWLINE """NEWLINENEWLINE # This command has to happen before we start drawingNEWLINE arcade.start_render()NEWLINENEWLINE # Draw all the sprites.NEWLINE self.all_sprites_list.draw()NEWLINENEWLINE # Put the text on the screen.NEWLINE output = "Score: {}".format(self.score)NEWLINE arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)NEWLINENEWLINE def on_mouse_motion(self, x, y, dx, dy):NEWLINE """NEWLINE Called whenever the mouse moves.NEWLINE """NEWLINE self.player_sprite.center_x = xNEWLINE self.player_sprite.center_y = yNEWLINENEWLINE def animate(self, delta_time):NEWLINE """ Movement and game logic """NEWLINENEWLINE # Call update on all sprites (The sprites don't do much in thisNEWLINE # example though.)NEWLINE self.all_sprites_list.update()NEWLINENEWLINE # Generate a list of all sprites that collided with the player.NEWLINE hit_list = \NEWLINE arcade.check_for_collision_with_list(self.player_sprite,NEWLINE self.coin_list)NEWLINENEWLINE # Loop through each colliding sprite, remove it, and add to the score.NEWLINE for coin in hit_list:NEWLINE coin.kill()NEWLINE self.score += 1NEWLINENEWLINENEWLINEwindow = MyApplication(SCREEN_WIDTH, SCREEN_HEIGHT)NEWLINEwindow.setup()NEWLINENEWLINEarcade.run()NEWLINE
import numpy as npNEWLINEimport unittestNEWLINENEWLINEimport chainerNEWLINEfrom chainer import optimizersNEWLINEfrom chainer import testingNEWLINEfrom chainer.testing import attrNEWLINENEWLINEfrom chainercv.links.model.ssd import GradientScalingNEWLINENEWLINENEWLINEclass SimpleLink(chainer.Link):NEWLINENEWLINE def __init__(self, w, g):NEWLINE super(SimpleLink, self).__init__()NEWLINE with self.init_scope():NEWLINE self.param = chainer.Parameter(w)NEWLINE self.param.grad = gNEWLINENEWLINENEWLINEclass TestGradientScaling(unittest.TestCase):NEWLINENEWLINE def setUp(self):NEWLINE self.target = SimpleLink(NEWLINE np.arange(6, dtype=np.float32).reshape((2, 3)),NEWLINE np.arange(3, -3, -1, dtype=np.float32).reshape((2, 3)))NEWLINENEWLINE def check_gradient_scaling(self):NEWLINE w = self.target.param.arrayNEWLINE g = self.target.param.gradNEWLINENEWLINE rate = 0.2NEWLINE expect = w - g * rateNEWLINENEWLINE opt = optimizers.SGD(lr=1)NEWLINE opt.setup(self.target)NEWLINE opt.add_hook(GradientScaling(rate))NEWLINE opt.update()NEWLINENEWLINE testing.assert_allclose(expect, w)NEWLINENEWLINE def test_gradient_scaling_cpu(self):NEWLINE self.check_gradient_scaling()NEWLINENEWLINE @attr.gpuNEWLINE def test_gradient_scaling_gpu(self):NEWLINE self.target.to_gpu()NEWLINE self.check_gradient_scaling()NEWLINENEWLINENEWLINEtesting.run_module(__name__, __file__)NEWLINE
import osNEWLINEimport sysNEWLINEimport pickleNEWLINEimport copyNEWLINEimport warningsNEWLINEimport platformNEWLINEimport textwrapNEWLINEimport globNEWLINEfrom os.path import joinNEWLINENEWLINEfrom numpy.distutils import logNEWLINEfrom distutils.dep_util import newerNEWLINEfrom sysconfig import get_config_varNEWLINEfrom numpy.compat import npy_load_moduleNEWLINEfrom setup_common import * # noqa: F403NEWLINENEWLINE# Set to True to enable relaxed strides checking. This (mostly) meansNEWLINE# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.NEWLINENPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")NEWLINENEWLINE# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use aNEWLINE# bogus value for affected strides in order to help smoke out bad stride usageNEWLINE# when relaxed stride checking is enabled.NEWLINENPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0")NEWLINENPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKINGNEWLINENEWLINE# Set NPY_DISABLE_SVML=1 in the environment to disable the vendored SVMLNEWLINE# library. This option only has significance on a Linux x86_64 host and is mostNEWLINE# useful to avoid improperly requiring SVML when cross compiling.NEWLINENPY_DISABLE_SVML = (os.environ.get('NPY_DISABLE_SVML', "0") == "1")NEWLINENEWLINE# XXX: ugly, we use a class to avoid calling twice some expensive functions inNEWLINE# config.h/numpyconfig.h. I don't see a better way because distutils forceNEWLINE# config.h generation inside an Extension class, and as such sharingNEWLINE# configuration information between extensions is not easy.NEWLINE# Using a pickled-based memoize does not work because config_cmd is an instanceNEWLINE# method, which cPickle does not like.NEWLINE#NEWLINE# Use pickle in all cases, as cPickle is gone in python3 and the differenceNEWLINE# in time is only in build. -- Charles Harris, 2013-03-30NEWLINENEWLINEclass CallOnceOnly:NEWLINE def __init__(self):NEWLINE self._check_types = NoneNEWLINE self._check_ieee_macros = NoneNEWLINE self._check_complex = NoneNEWLINENEWLINE def check_types(self, *a, **kw):NEWLINE if self._check_types is None:NEWLINE out = check_types(*a, **kw)NEWLINE self._check_types = pickle.dumps(out)NEWLINE else:NEWLINE out = copy.deepcopy(pickle.loads(self._check_types))NEWLINE return outNEWLINENEWLINE def check_ieee_macros(self, *a, **kw):NEWLINE if self._check_ieee_macros is None:NEWLINE out = check_ieee_macros(*a, **kw)NEWLINE self._check_ieee_macros = pickle.dumps(out)NEWLINE else:NEWLINE out = copy.deepcopy(pickle.loads(self._check_ieee_macros))NEWLINE return outNEWLINENEWLINE def check_complex(self, *a, **kw):NEWLINE if self._check_complex is None:NEWLINE out = check_complex(*a, **kw)NEWLINE self._check_complex = pickle.dumps(out)NEWLINE else:NEWLINE out = copy.deepcopy(pickle.loads(self._check_complex))NEWLINE return outNEWLINENEWLINEdef can_link_svml():NEWLINE """SVML library is supported only on x86_64 architecture and currentlyNEWLINE only on linuxNEWLINE """NEWLINE if NPY_DISABLE_SVML:NEWLINE return FalseNEWLINE machine = platform.machine()NEWLINE system = platform.system()NEWLINE return "x86_64" in machine and system == "Linux"NEWLINENEWLINEdef check_svml_submodule(svmlpath):NEWLINE if not os.path.exists(svmlpath + "/README.md"):NEWLINE raise RuntimeError("Missing `SVML` submodule! Run `git submodule "NEWLINE "update --init` to fix this.")NEWLINE return TrueNEWLINENEWLINEdef pythonlib_dir():NEWLINE """return path where libpython* is."""NEWLINE if sys.platform == 'win32':NEWLINE return os.path.join(sys.prefix, "libs")NEWLINE else:NEWLINE return get_config_var('LIBDIR')NEWLINENEWLINEdef is_npy_no_signal():NEWLINE """Return True if the NPY_NO_SIGNAL symbol must be defined in configurationNEWLINE header."""NEWLINE return sys.platform == 'win32'NEWLINENEWLINEdef is_npy_no_smp():NEWLINE """Return True if the NPY_NO_SMP symbol must be defined in publicNEWLINE header (when SMP support cannot be reliably enabled)."""NEWLINE # Perhaps a fancier check is in order here.NEWLINE # so that threads are only enabled if thereNEWLINE # are actually multiple CPUS? -- butNEWLINE # threaded code can be nice even on a singleNEWLINE # CPU so that long-calculating code doesn'tNEWLINE # block.NEWLINE return 'NPY_NOSMP' in os.environNEWLINENEWLINEdef win32_checks(deflist):NEWLINE from numpy.distutils.misc_util import get_build_architectureNEWLINE a = get_build_architecture()NEWLINENEWLINE # Distutils hack on AMD64 on windowsNEWLINE print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %NEWLINE (a, os.name, sys.platform))NEWLINE if a == 'AMD64':NEWLINE deflist.append('DISTUTILS_USE_SDK')NEWLINENEWLINE # On win32, force long double format string to be 'g', notNEWLINE # 'Lg', since the MS runtime does not support long double whoseNEWLINE # size is > sizeof(double)NEWLINE if a == "Intel" or a == "AMD64":NEWLINE deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')NEWLINENEWLINEdef check_math_capabilities(config, ext, moredefs, mathlibs):NEWLINE def check_func(func_name):NEWLINE return config.check_func(func_name, libraries=mathlibs,NEWLINE decl=True, call=True)NEWLINENEWLINE def check_funcs_once(funcs_name):NEWLINE decl = dict([(f, True) for f in funcs_name])NEWLINE st = config.check_funcs_once(funcs_name, libraries=mathlibs,NEWLINE decl=decl, call=decl)NEWLINE if st:NEWLINE moredefs.extend([(fname2def(f), 1) for f in funcs_name])NEWLINE return stNEWLINENEWLINE def check_funcs(funcs_name):NEWLINE # Use check_funcs_once first, and if it does not work, test func perNEWLINE # func. Return success only if all the functions are availableNEWLINE if not check_funcs_once(funcs_name):NEWLINE # Global check failed, check func per funcNEWLINE for f in funcs_name:NEWLINE if check_func(f):NEWLINE moredefs.append((fname2def(f), 1))NEWLINE return 0NEWLINE else:NEWLINE return 1NEWLINENEWLINE #use_msvc = config.check_decl("_MSC_VER")NEWLINENEWLINE if not check_funcs_once(MANDATORY_FUNCS):NEWLINE raise SystemError("One of the required function to build numpy is not"NEWLINE " available (the list is %s)." % str(MANDATORY_FUNCS))NEWLINENEWLINE # Standard functions which may not be available and for which we have aNEWLINE # replacement implementation. Note that some of these are C99 functions.NEWLINENEWLINE # XXX: hack to circumvent cpp pollution from python: python put itsNEWLINE # config.h in the public namespace, so we have a clash for the commonNEWLINE # functions we test. We remove every function tested by python'sNEWLINE # autoconf, hoping their own test are correctNEWLINE for f in OPTIONAL_STDFUNCS_MAYBE:NEWLINE if config.check_decl(fname2def(f),NEWLINE headers=["Python.h", "math.h"]):NEWLINE OPTIONAL_STDFUNCS.remove(f)NEWLINENEWLINE check_funcs(OPTIONAL_STDFUNCS)NEWLINENEWLINE for h in OPTIONAL_HEADERS:NEWLINE if config.check_func("", decl=False, call=False, headers=[h]):NEWLINE h = h.replace(".", "_").replace(os.path.sep, "_")NEWLINE moredefs.append((fname2def(h), 1))NEWLINENEWLINE for tup in OPTIONAL_INTRINSICS:NEWLINE headers = NoneNEWLINE if len(tup) == 2:NEWLINE f, args, m = tup[0], tup[1], fname2def(tup[0])NEWLINE elif len(tup) == 3:NEWLINE f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0])NEWLINE else:NEWLINE f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3])NEWLINE if config.check_func(f, decl=False, call=True, call_args=args,NEWLINE headers=headers):NEWLINE moredefs.append((m, 1))NEWLINENEWLINE for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:NEWLINE if config.check_gcc_function_attribute(dec, fn):NEWLINE moredefs.append((fname2def(fn), 1))NEWLINE if fn == 'attribute_target_avx512f':NEWLINE # GH-14787: Work around GCC<8.4 bug when compiling with AVX512NEWLINE # support on Windows-based platformsNEWLINE if (sys.platform in ('win32', 'cygwin') andNEWLINE config.check_compiler_gcc() andNEWLINE not config.check_gcc_version_at_least(8, 4)):NEWLINE ext.extra_compile_args.extend(NEWLINE ['-ffixed-xmm%s' % n for n in range(16, 32)])NEWLINENEWLINE for dec, fn, code, header in OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS:NEWLINE if config.check_gcc_function_attribute_with_intrinsics(dec, fn, code,NEWLINE header):NEWLINE moredefs.append((fname2def(fn), 1))NEWLINENEWLINE for fn in OPTIONAL_VARIABLE_ATTRIBUTES:NEWLINE if config.check_gcc_variable_attribute(fn):NEWLINE m = fn.replace("(", "_").replace(")", "_")NEWLINE moredefs.append((fname2def(m), 1))NEWLINENEWLINE # C99 functions: float and long double versionsNEWLINE check_funcs(C99_FUNCS_SINGLE)NEWLINE check_funcs(C99_FUNCS_EXTENDED)NEWLINENEWLINEdef check_complex(config, mathlibs):NEWLINE priv = []NEWLINE pub = []NEWLINENEWLINE try:NEWLINE if os.uname()[0] == "Interix":NEWLINE warnings.warn("Disabling broken complex support. See #1365", stacklevel=2)NEWLINE return priv, pubNEWLINE except Exception:NEWLINE # os.uname not available on all platforms. blanket except ugly but safeNEWLINE passNEWLINENEWLINE # Check for complex supportNEWLINE st = config.check_header('complex.h')NEWLINE if st:NEWLINE priv.append(('HAVE_COMPLEX_H', 1))NEWLINE pub.append(('NPY_USE_C99_COMPLEX', 1))NEWLINENEWLINE for t in C99_COMPLEX_TYPES:NEWLINE st = config.check_type(t, headers=["complex.h"])NEWLINE if st:NEWLINE pub.append(('NPY_HAVE_%s' % type2def(t), 1))NEWLINENEWLINE def check_prec(prec):NEWLINE flist = [f + prec for f in C99_COMPLEX_FUNCS]NEWLINE decl = dict([(f, True) for f in flist])NEWLINE if not config.check_funcs_once(flist, call=decl, decl=decl,NEWLINE libraries=mathlibs):NEWLINE for f in flist:NEWLINE if config.check_func(f, call=True, decl=True,NEWLINE libraries=mathlibs):NEWLINE priv.append((fname2def(f), 1))NEWLINE else:NEWLINE priv.extend([(fname2def(f), 1) for f in flist])NEWLINENEWLINE check_prec('')NEWLINE check_prec('f')NEWLINE check_prec('l')NEWLINENEWLINE return priv, pubNEWLINENEWLINEdef check_ieee_macros(config):NEWLINE priv = []NEWLINE pub = []NEWLINENEWLINE macros = []NEWLINENEWLINE def _add_decl(f):NEWLINE priv.append(fname2def("decl_%s" % f))NEWLINE pub.append('NPY_%s' % fname2def("decl_%s" % f))NEWLINENEWLINE # XXX: hack to circumvent cpp pollution from python: python put itsNEWLINE # config.h in the public namespace, so we have a clash for the commonNEWLINE # functions we test. We remove every function tested by python'sNEWLINE # autoconf, hoping their own test are correctNEWLINE _macros = ["isnan", "isinf", "signbit", "isfinite"]NEWLINE for f in _macros:NEWLINE py_symbol = fname2def("decl_%s" % f)NEWLINE already_declared = config.check_decl(py_symbol,NEWLINE headers=["Python.h", "math.h"])NEWLINE if already_declared:NEWLINE if config.check_macro_true(py_symbol,NEWLINE headers=["Python.h", "math.h"]):NEWLINE pub.append('NPY_%s' % fname2def("decl_%s" % f))NEWLINE else:NEWLINE macros.append(f)NEWLINE # Normally, isnan and isinf are macro (C99), but some platforms only haveNEWLINE # func, or both func and macro version. Check for macro only, and defineNEWLINE # replacement ones if not found.NEWLINE # Note: including Python.h is necessary because it modifies some math.hNEWLINE # definitionsNEWLINE for f in macros:NEWLINE st = config.check_decl(f, headers=["Python.h", "math.h"])NEWLINE if st:NEWLINE _add_decl(f)NEWLINENEWLINE return priv, pubNEWLINENEWLINEdef check_types(config_cmd, ext, build_dir):NEWLINE private_defines = []NEWLINE public_defines = []NEWLINENEWLINE # Expected size (in number of bytes) for each type. This is anNEWLINE # optimization: those are only hints, and an exhaustive search for the sizeNEWLINE # is done if the hints are wrong.NEWLINE expected = {'short': [2], 'int': [4], 'long': [8, 4],NEWLINE 'float': [4], 'double': [8], 'long double': [16, 12, 8],NEWLINE 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],NEWLINE 'off_t': [8, 4]}NEWLINENEWLINE # Check we have the python header (-dev* packages on Linux)NEWLINE result = config_cmd.check_header('Python.h')NEWLINE if not result:NEWLINE python = 'python'NEWLINE if '__pypy__' in sys.builtin_module_names:NEWLINE python = 'pypy'NEWLINE raise SystemError(NEWLINE "Cannot compile 'Python.h'. Perhaps you need to "NEWLINE "install {0}-dev|{0}-devel.".format(python))NEWLINE res = config_cmd.check_header("endian.h")NEWLINE if res:NEWLINE private_defines.append(('HAVE_ENDIAN_H', 1))NEWLINE public_defines.append(('NPY_HAVE_ENDIAN_H', 1))NEWLINE res = config_cmd.check_header("sys/endian.h")NEWLINE if res:NEWLINE private_defines.append(('HAVE_SYS_ENDIAN_H', 1))NEWLINE public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1))NEWLINENEWLINE # Check basic types sizesNEWLINE for type in ('short', 'int', 'long'):NEWLINE res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"])NEWLINE if res:NEWLINE public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))NEWLINE else:NEWLINE res = config_cmd.check_type_size(type, expected=expected[type])NEWLINE if res >= 0:NEWLINE public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))NEWLINE else:NEWLINE raise SystemError("Checking sizeof (%s) failed !" % type)NEWLINENEWLINE for type in ('float', 'double', 'long double'):NEWLINE already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),NEWLINE headers=["Python.h"])NEWLINE res = config_cmd.check_type_size(type, expected=expected[type])NEWLINE if res >= 0:NEWLINE public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))NEWLINE if not already_declared and not type == 'long double':NEWLINE private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))NEWLINE else:NEWLINE raise SystemError("Checking sizeof (%s) failed !" % type)NEWLINENEWLINE # Compute size of corresponding complex type: used to check that ourNEWLINE # definition is binary compatible with C99 complex type (check done atNEWLINE # build time in npy_common.h)NEWLINE complex_def = "struct {%s __x; %s __y;}" % (type, type)NEWLINE res = config_cmd.check_type_size(complex_def,NEWLINE expected=[2 * x for x in expected[type]])NEWLINE if res >= 0:NEWLINE public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))NEWLINE else:NEWLINE raise SystemError("Checking sizeof (%s) failed !" % complex_def)NEWLINENEWLINE for type in ('Py_intptr_t', 'off_t'):NEWLINE res = config_cmd.check_type_size(type, headers=["Python.h"],NEWLINE library_dirs=[pythonlib_dir()],NEWLINE expected=expected[type])NEWLINENEWLINE if res >= 0:NEWLINE private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))NEWLINE public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))NEWLINE else:NEWLINE raise SystemError("Checking sizeof (%s) failed !" % type)NEWLINENEWLINE # We check declaration AND type because that's how distutils does it.NEWLINE if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):NEWLINE res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],NEWLINE library_dirs=[pythonlib_dir()],NEWLINE expected=expected['PY_LONG_LONG'])NEWLINE if res >= 0:NEWLINE private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))NEWLINE public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))NEWLINE else:NEWLINE raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')NEWLINENEWLINE res = config_cmd.check_type_size('long long',NEWLINE expected=expected['long long'])NEWLINE if res >= 0:NEWLINE #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))NEWLINE public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))NEWLINE else:NEWLINE raise SystemError("Checking sizeof (%s) failed !" % 'long long')NEWLINENEWLINE if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):NEWLINE raise RuntimeError(NEWLINE "Config wo CHAR_BIT is not supported"NEWLINE ", please contact the maintainers")NEWLINENEWLINE return private_defines, public_definesNEWLINENEWLINEdef check_mathlib(config_cmd):NEWLINE # Testing the C math libraryNEWLINE mathlibs = []NEWLINE mathlibs_choices = [[], ['m'], ['cpml']]NEWLINE mathlib = os.environ.get('MATHLIB')NEWLINE if mathlib:NEWLINE mathlibs_choices.insert(0, mathlib.split(','))NEWLINE for libs in mathlibs_choices:NEWLINE if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):NEWLINE mathlibs = libsNEWLINE breakNEWLINE else:NEWLINE raise RuntimeError(NEWLINE "math library missing; rerun setup.py after setting the "NEWLINE "MATHLIB env variable")NEWLINE return mathlibsNEWLINENEWLINEdef visibility_define(config):NEWLINE """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be emptyNEWLINE string)."""NEWLINE hide = '__attribute__((visibility("hidden")))'NEWLINE if config.check_gcc_function_attribute(hide, 'hideme'):NEWLINE return hideNEWLINE else:NEWLINE return ''NEWLINENEWLINEdef configuration(parent_package='',top_path=None):NEWLINE from numpy.distutils.misc_util import (Configuration, dot_join,NEWLINE exec_mod_from_location)NEWLINE from numpy.distutils.system_info import (get_info, blas_opt_info,NEWLINE lapack_opt_info)NEWLINE from numpy.version import release as is_releasedNEWLINENEWLINE config = Configuration('core', parent_package, top_path)NEWLINE local_dir = config.local_pathNEWLINE codegen_dir = join(local_dir, 'code_generators')NEWLINENEWLINE if is_released:NEWLINE warnings.simplefilter('error', MismatchCAPIWarning)NEWLINENEWLINE # Check whether we have a mismatch between the set C API VERSION and theNEWLINE # actual C API VERSIONNEWLINE check_api_version(C_API_VERSION, codegen_dir)NEWLINENEWLINE generate_umath_py = join(codegen_dir, 'generate_umath.py')NEWLINE n = dot_join(config.name, 'generate_umath')NEWLINE generate_umath = exec_mod_from_location('_'.join(n.split('.')),NEWLINE generate_umath_py)NEWLINENEWLINE header_dir = 'include/numpy' # this is relative to config.path_in_packageNEWLINENEWLINE cocache = CallOnceOnly()NEWLINENEWLINE def generate_config_h(ext, build_dir):NEWLINE target = join(build_dir, header_dir, 'config.h')NEWLINE d = os.path.dirname(target)NEWLINE if not os.path.exists(d):NEWLINE os.makedirs(d)NEWLINENEWLINE if newer(__file__, target):NEWLINE config_cmd = config.get_config_cmd()NEWLINE log.info('Generating %s', target)NEWLINENEWLINE # Check sizeofNEWLINE moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)NEWLINENEWLINE # Check math library and C99 math funcs availabilityNEWLINE mathlibs = check_mathlib(config_cmd)NEWLINE moredefs.append(('MATHLIB', ','.join(mathlibs)))NEWLINENEWLINE check_math_capabilities(config_cmd, ext, moredefs, mathlibs)NEWLINE moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])NEWLINE moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])NEWLINENEWLINE # Signal checkNEWLINE if is_npy_no_signal():NEWLINE moredefs.append('__NPY_PRIVATE_NO_SIGNAL')NEWLINENEWLINE # Windows checksNEWLINE if sys.platform == 'win32' or os.name == 'nt':NEWLINE win32_checks(moredefs)NEWLINENEWLINE # C99 restrict keywordNEWLINE moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))NEWLINENEWLINE # Inline checkNEWLINE inline = config_cmd.check_inline()NEWLINENEWLINE if can_link_svml():NEWLINE moredefs.append(('NPY_CAN_LINK_SVML', 1))NEWLINENEWLINE # Use relaxed stride checkingNEWLINE if NPY_RELAXED_STRIDES_CHECKING:NEWLINE moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))NEWLINE else:NEWLINE moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 0))NEWLINENEWLINE # Use bogus stride debug aid when relaxed strides are enabledNEWLINE if NPY_RELAXED_STRIDES_DEBUG:NEWLINE moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))NEWLINE else:NEWLINE moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 0))NEWLINENEWLINE # Get long double representationNEWLINE rep = check_long_double_representation(config_cmd)NEWLINE moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))NEWLINENEWLINE if check_for_right_shift_internal_compiler_error(config_cmd):NEWLINE moredefs.append('NPY_DO_NOT_OPTIMIZE_LONG_right_shift')NEWLINE moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONG_right_shift')NEWLINE moredefs.append('NPY_DO_NOT_OPTIMIZE_LONGLONG_right_shift')NEWLINE moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONGLONG_right_shift')NEWLINENEWLINE # Generate the config.h file from moredefsNEWLINE with open(target, 'w') as target_f:NEWLINE for d in moredefs:NEWLINE if isinstance(d, str):NEWLINE target_f.write('#define %s\n' % (d))NEWLINE else:NEWLINE target_f.write('#define %s %s\n' % (d[0], d[1]))NEWLINENEWLINE # define inline to our keyword, or nothingNEWLINE target_f.write('#ifndef __cplusplus\n')NEWLINE if inline == 'inline':NEWLINE target_f.write('/* #undef inline */\n')NEWLINE else:NEWLINE target_f.write('#define inline %s\n' % inline)NEWLINE target_f.write('#endif\n')NEWLINENEWLINE # add the guard to make sure config.h is never included directly,NEWLINE # but always through npy_config.hNEWLINE target_f.write(textwrap.dedent("""NEWLINE #ifndef NUMPY_CORE_SRC_COMMON_NPY_CONFIG_H_NEWLINE #error config.h should never be included directly, include npy_config.h insteadNEWLINE #endifNEWLINE """))NEWLINENEWLINE log.info('File: %s' % target)NEWLINE with open(target) as target_f:NEWLINE log.info(target_f.read())NEWLINE log.info('EOF')NEWLINE else:NEWLINE mathlibs = []NEWLINE with open(target) as target_f:NEWLINE for line in target_f:NEWLINE s = '#define MATHLIB'NEWLINE if line.startswith(s):NEWLINE value = line[len(s):].strip()NEWLINE if value:NEWLINE mathlibs.extend(value.split(','))NEWLINENEWLINE # Ugly: this can be called within a library and not an extension,NEWLINE # in which case there is no libraries attributes (and none isNEWLINE # needed).NEWLINE if hasattr(ext, 'libraries'):NEWLINE ext.libraries.extend(mathlibs)NEWLINENEWLINE incl_dir = os.path.dirname(target)NEWLINE if incl_dir not in config.numpy_include_dirs:NEWLINE config.numpy_include_dirs.append(incl_dir)NEWLINENEWLINE return targetNEWLINENEWLINE def generate_numpyconfig_h(ext, build_dir):NEWLINE """Depends on config.h: generate_config_h has to be called before !"""NEWLINE # put common include directory in build_dir on search pathNEWLINE # allows using code generation in headersNEWLINE config.add_include_dirs(join(build_dir, "src", "common"))NEWLINE config.add_include_dirs(join(build_dir, "src", "npymath"))NEWLINENEWLINE target = join(build_dir, header_dir, '_numpyconfig.h')NEWLINE d = os.path.dirname(target)NEWLINE if not os.path.exists(d):NEWLINE os.makedirs(d)NEWLINE if newer(__file__, target):NEWLINE config_cmd = config.get_config_cmd()NEWLINE log.info('Generating %s', target)NEWLINENEWLINE # Check sizeofNEWLINE ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)NEWLINENEWLINE if is_npy_no_signal():NEWLINE moredefs.append(('NPY_NO_SIGNAL', 1))NEWLINENEWLINE if is_npy_no_smp():NEWLINE moredefs.append(('NPY_NO_SMP', 1))NEWLINE else:NEWLINE moredefs.append(('NPY_NO_SMP', 0))NEWLINENEWLINE mathlibs = check_mathlib(config_cmd)NEWLINE moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])NEWLINE moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])NEWLINENEWLINE if NPY_RELAXED_STRIDES_CHECKING:NEWLINE moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))NEWLINENEWLINE if NPY_RELAXED_STRIDES_DEBUG:NEWLINE moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))NEWLINENEWLINE # Check whether we can use inttypes (C99) formatsNEWLINE if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):NEWLINE moredefs.append(('NPY_USE_C99_FORMATS', 1))NEWLINENEWLINE # visibility checkNEWLINE hidden_visibility = visibility_define(config_cmd)NEWLINE moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))NEWLINENEWLINE # Add the C API/ABI versionsNEWLINE moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))NEWLINE moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))NEWLINENEWLINE # Add moredefs to headerNEWLINE with open(target, 'w') as target_f:NEWLINE for d in moredefs:NEWLINE if isinstance(d, str):NEWLINE target_f.write('#define %s\n' % (d))NEWLINE else:NEWLINE target_f.write('#define %s %s\n' % (d[0], d[1]))NEWLINENEWLINE # Define __STDC_FORMAT_MACROSNEWLINE target_f.write(textwrap.dedent("""NEWLINE #ifndef __STDC_FORMAT_MACROSNEWLINE #define __STDC_FORMAT_MACROS 1NEWLINE #endifNEWLINE """))NEWLINENEWLINE # Dump the numpyconfig.h header to stdoutNEWLINE log.info('File: %s' % target)NEWLINE with open(target) as target_f:NEWLINE log.info(target_f.read())NEWLINE log.info('EOF')NEWLINE config.add_data_files((header_dir, target))NEWLINE return targetNEWLINENEWLINE def generate_api_func(module_name):NEWLINE def generate_api(ext, build_dir):NEWLINE script = join(codegen_dir, module_name + '.py')NEWLINE sys.path.insert(0, codegen_dir)NEWLINE try:NEWLINE m = __import__(module_name)NEWLINE log.info('executing %s', script)NEWLINE h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))NEWLINE finally:NEWLINE del sys.path[0]NEWLINE config.add_data_files((header_dir, h_file),NEWLINE (header_dir, doc_file))NEWLINE return (h_file,)NEWLINE return generate_apiNEWLINENEWLINE generate_numpy_api = generate_api_func('generate_numpy_api')NEWLINE generate_ufunc_api = generate_api_func('generate_ufunc_api')NEWLINENEWLINE config.add_include_dirs(join(local_dir, "src", "common"))NEWLINE config.add_include_dirs(join(local_dir, "src"))NEWLINE config.add_include_dirs(join(local_dir))NEWLINENEWLINE config.add_data_dir('include/numpy')NEWLINE config.add_include_dirs(join('src', 'npymath'))NEWLINE config.add_include_dirs(join('src', 'multiarray'))NEWLINE config.add_include_dirs(join('src', 'umath'))NEWLINE config.add_include_dirs(join('src', 'npysort'))NEWLINE config.add_include_dirs(join('src', '_simd'))NEWLINENEWLINE config.add_define_macros([("NPY_INTERNAL_BUILD", "1")]) # this macro indicates that Numpy build is in processNEWLINE config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])NEWLINE if sys.platform[:3] == "aix":NEWLINE config.add_define_macros([("_LARGE_FILES", None)])NEWLINE else:NEWLINE config.add_define_macros([("_FILE_OFFSET_BITS", "64")])NEWLINE config.add_define_macros([('_LARGEFILE_SOURCE', '1')])NEWLINE config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])NEWLINENEWLINE config.numpy_include_dirs.extend(config.paths('include'))NEWLINENEWLINE deps = [join('src', 'npymath', '_signbit.c'),NEWLINE join('include', 'numpy', '*object.h'),NEWLINE join(codegen_dir, 'genapi.py'),NEWLINE ]NEWLINENEWLINE #######################################################################NEWLINE # npymath library #NEWLINE #######################################################################NEWLINENEWLINE subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])NEWLINENEWLINE def get_mathlib_info(*args):NEWLINE # Another ugly hack: the mathlib info is known once build_src is run,NEWLINE # but we cannot use add_installed_pkg_config here either, so we onlyNEWLINE # update the substitution dictionary during npymath buildNEWLINE config_cmd = config.get_config_cmd()NEWLINE # Check that the toolchain works, to fail early if it doesn'tNEWLINE # (avoid late errors with MATHLIB which are confusing if theNEWLINE # compiler does not work).NEWLINE for lang, test_code, note in (NEWLINE ('c', 'int main(void) { return 0;}', ''),NEWLINE ('c++', (NEWLINE 'int main(void)'NEWLINE '{ auto x = 0.0; return static_cast<int>(x); }'NEWLINE ), (NEWLINE 'note: A compiler with support for C++11 language 'NEWLINE 'features is required.'NEWLINE )NEWLINE ),NEWLINE ):NEWLINE is_cpp = lang == 'c++'NEWLINE if is_cpp:NEWLINE # this a workround to get rid of invalid c++ flagsNEWLINE # without doing big changes to config.NEWLINE # c tested first, compiler should be hereNEWLINE bk_c = config_cmd.compilerNEWLINE config_cmd.compiler = bk_c.cxx_compiler()NEWLINE st = config_cmd.try_link(test_code, lang=lang)NEWLINE if not st:NEWLINE # rerun the failing command in verbose modeNEWLINE config_cmd.compiler.verbose = TrueNEWLINE config_cmd.try_link(test_code, lang=lang)NEWLINE raise RuntimeError(NEWLINE f"Broken toolchain: cannot link a simple {lang.upper()} "NEWLINE f"program. {note}"NEWLINE )NEWLINE if is_cpp:NEWLINE config_cmd.compiler = bk_cNEWLINE mlibs = check_mathlib(config_cmd)NEWLINENEWLINE posix_mlib = ' '.join(['-l%s' % l for l in mlibs])NEWLINE msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])NEWLINE subst_dict["posix_mathlib"] = posix_mlibNEWLINE subst_dict["msvc_mathlib"] = msvc_mlibNEWLINENEWLINE npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'),NEWLINE join('src', 'npymath', 'npy_math.c'),NEWLINE join('src', 'npymath', 'ieee754.c.src'),NEWLINE join('src', 'npymath', 'npy_math_complex.c.src'),NEWLINE join('src', 'npymath', 'halffloat.c')NEWLINE ]NEWLINENEWLINE def gl_if_msvc(build_cmd):NEWLINE """ Add flag if we are using MSVC compilerNEWLINENEWLINE We can't see this in our scope, because we have not initialized theNEWLINE distutils build command, so use this deferred calculation to run whenNEWLINE we are building the library.NEWLINE """NEWLINE if build_cmd.compiler.compiler_type == 'msvc':NEWLINE # explicitly disable whole-program optimizationNEWLINE return ['/GL-']NEWLINE return []NEWLINENEWLINE config.add_installed_library('npymath',NEWLINE sources=npymath_sources + [get_mathlib_info],NEWLINE install_dir='lib',NEWLINE build_info={NEWLINE 'include_dirs' : [], # empty list required for creating npy_math_internal.hNEWLINE 'extra_compiler_args': [gl_if_msvc],NEWLINE })NEWLINE config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",NEWLINE subst_dict)NEWLINE config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",NEWLINE subst_dict)NEWLINENEWLINE #######################################################################NEWLINE # multiarray_tests module #NEWLINE #######################################################################NEWLINENEWLINE config.add_extension('_multiarray_tests',NEWLINE sources=[join('src', 'multiarray', '_multiarray_tests.c.src'),NEWLINE join('src', 'common', 'mem_overlap.c'),NEWLINE join('src', 'common', 'npy_argparse.c'),NEWLINE join('src', 'common', 'npy_hashtable.c')],NEWLINE depends=[join('src', 'common', 'mem_overlap.h'),NEWLINE join('src', 'common', 'npy_argparse.h'),NEWLINE join('src', 'common', 'npy_hashtable.h'),NEWLINE join('src', 'common', 'npy_extint128.h')],NEWLINE libraries=['npymath'])NEWLINENEWLINE #######################################################################NEWLINE # _multiarray_umath module - common part #NEWLINE #######################################################################NEWLINENEWLINE common_deps = [NEWLINE join('src', 'common', 'dlpack', 'dlpack.h'),NEWLINE join('src', 'common', 'array_assign.h'),NEWLINE join('src', 'common', 'binop_override.h'),NEWLINE join('src', 'common', 'cblasfuncs.h'),NEWLINE join('src', 'common', 'lowlevel_strided_loops.h'),NEWLINE join('src', 'common', 'mem_overlap.h'),NEWLINE join('src', 'common', 'npy_argparse.h'),NEWLINE join('src', 'common', 'npy_cblas.h'),NEWLINE join('src', 'common', 'npy_config.h'),NEWLINE join('src', 'common', 'npy_ctypes.h'),NEWLINE join('src', 'common', 'npy_dlpack.h'),NEWLINE join('src', 'common', 'npy_extint128.h'),NEWLINE join('src', 'common', 'npy_import.h'),NEWLINE join('src', 'common', 'npy_hashtable.h'),NEWLINE join('src', 'common', 'npy_longdouble.h'),NEWLINE join('src', 'common', 'npy_svml.h'),NEWLINE join('src', 'common', 'templ_common.h.src'),NEWLINE join('src', 'common', 'ucsnarrow.h'),NEWLINE join('src', 'common', 'ufunc_override.h'),NEWLINE join('src', 'common', 'umathmodule.h'),NEWLINE join('src', 'common', 'numpyos.h'),NEWLINE join('src', 'common', 'npy_cpu_dispatch.h'),NEWLINE join('src', 'common', 'simd', 'simd.h'),NEWLINE ]NEWLINENEWLINE common_src = [NEWLINE join('src', 'common', 'array_assign.c'),NEWLINE join('src', 'common', 'mem_overlap.c'),NEWLINE join('src', 'common', 'npy_argparse.c'),NEWLINE join('src', 'common', 'npy_hashtable.c'),NEWLINE join('src', 'common', 'npy_longdouble.c'),NEWLINE join('src', 'common', 'templ_common.h.src'),NEWLINE join('src', 'common', 'ucsnarrow.c'),NEWLINE join('src', 'common', 'ufunc_override.c'),NEWLINE join('src', 'common', 'numpyos.c'),NEWLINE join('src', 'common', 'npy_cpu_features.c.src'),NEWLINE ]NEWLINENEWLINE if os.environ.get('NPY_USE_BLAS_ILP64', "0") != "0":NEWLINE blas_info = get_info('blas_ilp64_opt', 2)NEWLINE else:NEWLINE blas_info = get_info('blas_opt', 0)NEWLINENEWLINE have_blas = blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', [])NEWLINENEWLINE if have_blas:NEWLINE extra_info = blas_infoNEWLINE # These files are also in MANIFEST.in so that they are always inNEWLINE # the source distribution independently of HAVE_CBLAS.NEWLINE common_src.extend([join('src', 'common', 'cblasfuncs.c'),NEWLINE join('src', 'common', 'python_xerbla.c'),NEWLINE ])NEWLINE else:NEWLINE extra_info = {}NEWLINENEWLINE #######################################################################NEWLINE # _multiarray_umath module - multiarray part #NEWLINE #######################################################################NEWLINENEWLINE multiarray_deps = [NEWLINE join('src', 'multiarray', 'abstractdtypes.h'),NEWLINE join('src', 'multiarray', 'arrayobject.h'),NEWLINE join('src', 'multiarray', 'arraytypes.h'),NEWLINE join('src', 'multiarray', 'arrayfunction_override.h'),NEWLINE join('src', 'multiarray', 'array_coercion.h'),NEWLINE join('src', 'multiarray', 'array_method.h'),NEWLINE join('src', 'multiarray', 'npy_buffer.h'),NEWLINE join('src', 'multiarray', 'calculation.h'),NEWLINE join('src', 'multiarray', 'common.h'),NEWLINE join('src', 'multiarray', 'common_dtype.h'),NEWLINE join('src', 'multiarray', 'convert_datatype.h'),NEWLINE join('src', 'multiarray', 'convert.h'),NEWLINE join('src', 'multiarray', 'conversion_utils.h'),NEWLINE join('src', 'multiarray', 'ctors.h'),NEWLINE join('src', 'multiarray', 'descriptor.h'),NEWLINE join('src', 'multiarray', 'dtypemeta.h'),NEWLINE join('src', 'multiarray', 'dtype_transfer.h'),NEWLINE join('src', 'multiarray', 'dragon4.h'),NEWLINE join('src', 'multiarray', 'einsum_debug.h'),NEWLINE join('src', 'multiarray', 'einsum_sumprod.h'),NEWLINE join('src', 'multiarray', 'experimental_public_dtype_api.h'),NEWLINE join('src', 'multiarray', 'getset.h'),NEWLINE join('src', 'multiarray', 'hashdescr.h'),NEWLINE join('src', 'multiarray', 'iterators.h'),NEWLINE join('src', 'multiarray', 'legacy_dtype_implementation.h'),NEWLINE join('src', 'multiarray', 'mapping.h'),NEWLINE join('src', 'multiarray', 'methods.h'),NEWLINE join('src', 'multiarray', 'multiarraymodule.h'),NEWLINE join('src', 'multiarray', 'nditer_impl.h'),NEWLINE join('src', 'multiarray', 'number.h'),NEWLINE join('src', 'multiarray', 'refcount.h'),NEWLINE join('src', 'multiarray', 'scalartypes.h'),NEWLINE join('src', 'multiarray', 'sequence.h'),NEWLINE join('src', 'multiarray', 'shape.h'),NEWLINE join('src', 'multiarray', 'strfuncs.h'),NEWLINE join('src', 'multiarray', 'typeinfo.h'),NEWLINE join('src', 'multiarray', 'usertypes.h'),NEWLINE join('src', 'multiarray', 'vdot.h'),NEWLINE join('src', 'multiarray', 'textreading', 'readtext.h'),NEWLINE join('include', 'numpy', 'arrayobject.h'),NEWLINE join('include', 'numpy', '_neighborhood_iterator_imp.h'),NEWLINE join('include', 'numpy', 'npy_endian.h'),NEWLINE join('include', 'numpy', 'arrayscalars.h'),NEWLINE join('include', 'numpy', 'noprefix.h'),NEWLINE join('include', 'numpy', 'npy_interrupt.h'),NEWLINE join('include', 'numpy', 'npy_3kcompat.h'),NEWLINE join('include', 'numpy', 'npy_math.h'),NEWLINE join('include', 'numpy', 'halffloat.h'),NEWLINE join('include', 'numpy', 'npy_common.h'),NEWLINE join('include', 'numpy', 'npy_os.h'),NEWLINE join('include', 'numpy', 'utils.h'),NEWLINE join('include', 'numpy', 'ndarrayobject.h'),NEWLINE join('include', 'numpy', 'npy_cpu.h'),NEWLINE join('include', 'numpy', 'numpyconfig.h'),NEWLINE join('include', 'numpy', 'ndarraytypes.h'),NEWLINE join('include', 'numpy', 'npy_1_7_deprecated_api.h'),NEWLINE # add library sources as distuils does not consider librariesNEWLINE # dependenciesNEWLINE ] + npymath_sourcesNEWLINENEWLINE multiarray_src = [NEWLINE join('src', 'multiarray', 'abstractdtypes.c'),NEWLINE join('src', 'multiarray', 'alloc.c'),NEWLINE join('src', 'multiarray', 'arrayobject.c'),NEWLINE join('src', 'multiarray', 'arraytypes.c.src'),NEWLINE join('src', 'multiarray', 'array_coercion.c'),NEWLINE join('src', 'multiarray', 'array_method.c'),NEWLINE join('src', 'multiarray', 'array_assign_scalar.c'),NEWLINE join('src', 'multiarray', 'array_assign_array.c'),NEWLINE join('src', 'multiarray', 'arrayfunction_override.c'),NEWLINE join('src', 'multiarray', 'buffer.c'),NEWLINE join('src', 'multiarray', 'calculation.c'),NEWLINE join('src', 'multiarray', 'compiled_base.c'),NEWLINE join('src', 'multiarray', 'common.c'),NEWLINE join('src', 'multiarray', 'common_dtype.c'),NEWLINE join('src', 'multiarray', 'convert.c'),NEWLINE join('src', 'multiarray', 'convert_datatype.c'),NEWLINE join('src', 'multiarray', 'conversion_utils.c'),NEWLINE join('src', 'multiarray', 'ctors.c'),NEWLINE join('src', 'multiarray', 'datetime.c'),NEWLINE join('src', 'multiarray', 'datetime_strings.c'),NEWLINE join('src', 'multiarray', 'datetime_busday.c'),NEWLINE join('src', 'multiarray', 'datetime_busdaycal.c'),NEWLINE join('src', 'multiarray', 'descriptor.c'),NEWLINE join('src', 'multiarray', 'dlpack.c'),NEWLINE join('src', 'multiarray', 'dtypemeta.c'),NEWLINE join('src', 'multiarray', 'dragon4.c'),NEWLINE join('src', 'multiarray', 'dtype_transfer.c'),NEWLINE join('src', 'multiarray', 'einsum.c.src'),NEWLINE join('src', 'multiarray', 'einsum_sumprod.c.src'),NEWLINE join('src', 'multiarray', 'experimental_public_dtype_api.c'),NEWLINE join('src', 'multiarray', 'flagsobject.c'),NEWLINE join('src', 'multiarray', 'getset.c'),NEWLINE join('src', 'multiarray', 'hashdescr.c'),NEWLINE join('src', 'multiarray', 'item_selection.c'),NEWLINE join('src', 'multiarray', 'iterators.c'),NEWLINE join('src', 'multiarray', 'legacy_dtype_implementation.c'),NEWLINE join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),NEWLINE join('src', 'multiarray', 'mapping.c'),NEWLINE join('src', 'multiarray', 'methods.c'),NEWLINE join('src', 'multiarray', 'multiarraymodule.c'),NEWLINE join('src', 'multiarray', 'nditer_templ.c.src'),NEWLINE join('src', 'multiarray', 'nditer_api.c'),NEWLINE join('src', 'multiarray', 'nditer_constr.c'),NEWLINE join('src', 'multiarray', 'nditer_pywrap.c'),NEWLINE join('src', 'multiarray', 'number.c'),NEWLINE join('src', 'multiarray', 'refcount.c'),NEWLINE join('src', 'multiarray', 'sequence.c'),NEWLINE join('src', 'multiarray', 'shape.c'),NEWLINE join('src', 'multiarray', 'scalarapi.c'),NEWLINE join('src', 'multiarray', 'scalartypes.c.src'),NEWLINE join('src', 'multiarray', 'strfuncs.c'),NEWLINE join('src', 'multiarray', 'temp_elide.c'),NEWLINE join('src', 'multiarray', 'typeinfo.c'),NEWLINE join('src', 'multiarray', 'usertypes.c'),NEWLINE join('src', 'multiarray', 'vdot.c'),NEWLINE join('src', 'common', 'npy_sort.h.src'),NEWLINE join('src', 'npysort', 'x86-qsort.dispatch.c.src'),NEWLINE join('src', 'npysort', 'quicksort.c.src'),NEWLINE join('src', 'npysort', 'mergesort.cpp'),NEWLINE join('src', 'npysort', 'timsort.cpp'),NEWLINE join('src', 'npysort', 'heapsort.cpp'),NEWLINE join('src', 'npysort', 'radixsort.cpp'),NEWLINE join('src', 'common', 'npy_partition.h.src'),NEWLINE join('src', 'npysort', 'selection.cpp'),NEWLINE join('src', 'common', 'npy_binsearch.h'),NEWLINE join('src', 'npysort', 'binsearch.cpp'),NEWLINE join('src', 'multiarray', 'textreading', 'conversions.c'),NEWLINE join('src', 'multiarray', 'textreading', 'field_types.c'),NEWLINE join('src', 'multiarray', 'textreading', 'growth.c'),NEWLINE join('src', 'multiarray', 'textreading', 'readtext.c'),NEWLINE join('src', 'multiarray', 'textreading', 'rows.c'),NEWLINE join('src', 'multiarray', 'textreading', 'stream_pyobject.c'),NEWLINE join('src', 'multiarray', 'textreading', 'str_to_int.c'),NEWLINE join('src', 'multiarray', 'textreading', 'tokenize.c.src'),NEWLINE ]NEWLINENEWLINE #######################################################################NEWLINE # _multiarray_umath module - umath part #NEWLINE #######################################################################NEWLINENEWLINE def generate_umath_c(ext, build_dir):NEWLINE target = join(build_dir, header_dir, '__umath_generated.c')NEWLINE dir = os.path.dirname(target)NEWLINE if not os.path.exists(dir):NEWLINE os.makedirs(dir)NEWLINE script = generate_umath_pyNEWLINE if newer(script, target):NEWLINE with open(target, 'w') as f:NEWLINE f.write(generate_umath.make_code(generate_umath.defdict,NEWLINE generate_umath.__file__))NEWLINE return []NEWLINENEWLINE def generate_umath_doc_header(ext, build_dir):NEWLINE from numpy.distutils.misc_util import exec_mod_from_locationNEWLINENEWLINE target = join(build_dir, header_dir, '_umath_doc_generated.h')NEWLINE dir = os.path.dirname(target)NEWLINE if not os.path.exists(dir):NEWLINE os.makedirs(dir)NEWLINENEWLINE generate_umath_doc_py = join(codegen_dir, 'generate_umath_doc.py')NEWLINE if newer(generate_umath_doc_py, target):NEWLINE n = dot_join(config.name, 'generate_umath_doc')NEWLINE generate_umath_doc = exec_mod_from_location(NEWLINE '_'.join(n.split('.')), generate_umath_doc_py)NEWLINE generate_umath_doc.write_code(target)NEWLINENEWLINE umath_src = [NEWLINE join('src', 'umath', 'umathmodule.c'),NEWLINE join('src', 'umath', 'reduction.c'),NEWLINE join('src', 'umath', 'funcs.inc.src'),NEWLINE join('src', 'umath', 'simd.inc.src'),NEWLINE join('src', 'umath', 'loops.h.src'),NEWLINE join('src', 'umath', 'loops_utils.h.src'),NEWLINE join('src', 'umath', 'loops.c.src'),NEWLINE join('src', 'umath', 'loops_unary_fp.dispatch.c.src'),NEWLINE join('src', 'umath', 'loops_arithm_fp.dispatch.c.src'),NEWLINE join('src', 'umath', 'loops_arithmetic.dispatch.c.src'),NEWLINE join('src', 'umath', 'loops_minmax.dispatch.c.src'),NEWLINE join('src', 'umath', 'loops_trigonometric.dispatch.c.src'),NEWLINE join('src', 'umath', 'loops_umath_fp.dispatch.c.src'),NEWLINE join('src', 'umath', 'loops_exponent_log.dispatch.c.src'),NEWLINE join('src', 'umath', 'loops_hyperbolic.dispatch.c.src'),NEWLINE join('src', 'umath', 'matmul.h.src'),NEWLINE join('src', 'umath', 'matmul.c.src'),NEWLINE join('src', 'umath', 'clip.h'),NEWLINE join('src', 'umath', 'clip.cpp'),NEWLINE join('src', 'umath', 'dispatching.c'),NEWLINE join('src', 'umath', 'legacy_array_method.c'),NEWLINE join('src', 'umath', 'wrapping_array_method.c'),NEWLINE join('src', 'umath', 'ufunc_object.c'),NEWLINE join('src', 'umath', 'extobj.c'),NEWLINE join('src', 'umath', 'scalarmath.c.src'),NEWLINE join('src', 'umath', 'ufunc_type_resolution.c'),NEWLINE join('src', 'umath', 'override.c'),NEWLINE # For testing. Eventually, should use public API and be separate:NEWLINE join('src', 'umath', '_scaled_float_dtype.c'),NEWLINE ]NEWLINENEWLINE umath_deps = [NEWLINE generate_umath_py,NEWLINE join('include', 'numpy', 'npy_math.h'),NEWLINE join('include', 'numpy', 'halffloat.h'),NEWLINE join('src', 'multiarray', 'common.h'),NEWLINE join('src', 'multiarray', 'number.h'),NEWLINE join('src', 'common', 'templ_common.h.src'),NEWLINE join('src', 'umath', 'simd.inc.src'),NEWLINE join('src', 'umath', 'override.h'),NEWLINE join(codegen_dir, 'generate_ufunc_api.py'),NEWLINE join(codegen_dir, 'ufunc_docstrings.py'),NEWLINE ]NEWLINENEWLINE svml_path = join('numpy', 'core', 'src', 'umath', 'svml')NEWLINE svml_objs = []NEWLINE # we have converted the following into universal intrinsicsNEWLINE # so we can bring the benefits of performance for all platformsNEWLINE # not just for avx512 on linux without performance/accuracy regression,NEWLINE # actually the other way around, better performance andNEWLINE # after all maintainable code.NEWLINE svml_filter = (NEWLINE 'svml_z0_tanh_d_la.s', 'svml_z0_tanh_s_la.s'NEWLINE )NEWLINE if can_link_svml() and check_svml_submodule(svml_path):NEWLINE svml_objs = glob.glob(svml_path + '/**/*.s', recursive=True)NEWLINE svml_objs = [o for o in svml_objs if not o.endswith(svml_filter)]NEWLINENEWLINE config.add_extension('_multiarray_umath',NEWLINE # Forcing C language even though we have C++ sources.NEWLINE # It forces the C linker and don't link C++ runtime.NEWLINE language = 'c',NEWLINE sources=multiarray_src + umath_src +NEWLINE common_src +NEWLINE [generate_config_h,NEWLINE generate_numpyconfig_h,NEWLINE generate_numpy_api,NEWLINE join(codegen_dir, 'generate_numpy_api.py'),NEWLINE join('*.py'),NEWLINE generate_umath_c,NEWLINE generate_umath_doc_header,NEWLINE generate_ufunc_api,NEWLINE ],NEWLINE depends=deps + multiarray_deps + umath_deps +NEWLINE common_deps,NEWLINE libraries=['npymath'],NEWLINE extra_objects=svml_objs,NEWLINE extra_info=extra_info,NEWLINE extra_cxx_compile_args=['-std=c++11',NEWLINE '-D__STDC_VERSION__=0',NEWLINE '-fno-exceptions',NEWLINE '-fno-rtti'])NEWLINENEWLINE #######################################################################NEWLINE # umath_tests module #NEWLINE #######################################################################NEWLINENEWLINE config.add_extension('_umath_tests', sources=[NEWLINE join('src', 'umath', '_umath_tests.c.src'),NEWLINE join('src', 'umath', '_umath_tests.dispatch.c'),NEWLINE join('src', 'common', 'npy_cpu_features.c.src'),NEWLINE ])NEWLINENEWLINE #######################################################################NEWLINE # custom rational dtype module #NEWLINE #######################################################################NEWLINENEWLINE config.add_extension('_rational_tests',NEWLINE sources=[join('src', 'umath', '_rational_tests.c.src')])NEWLINENEWLINE #######################################################################NEWLINE # struct_ufunc_test module #NEWLINE #######################################################################NEWLINENEWLINE config.add_extension('_struct_ufunc_tests',NEWLINE sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')])NEWLINENEWLINENEWLINE #######################################################################NEWLINE # operand_flag_tests module #NEWLINE #######################################################################NEWLINENEWLINE config.add_extension('_operand_flag_tests',NEWLINE sources=[join('src', 'umath', '_operand_flag_tests.c')])NEWLINENEWLINE #######################################################################NEWLINE # SIMD module #NEWLINE #######################################################################NEWLINENEWLINE config.add_extension('_simd', sources=[NEWLINE join('src', 'common', 'npy_cpu_features.c.src'),NEWLINE join('src', '_simd', '_simd.c'),NEWLINE join('src', '_simd', '_simd_inc.h.src'),NEWLINE join('src', '_simd', '_simd_data.inc.src'),NEWLINE join('src', '_simd', '_simd.dispatch.c.src'),NEWLINE ], depends=[NEWLINE join('src', 'common', 'npy_cpu_dispatch.h'),NEWLINE join('src', 'common', 'simd', 'simd.h'),NEWLINE join('src', '_simd', '_simd.h'),NEWLINE join('src', '_simd', '_simd_inc.h.src'),NEWLINE join('src', '_simd', '_simd_data.inc.src'),NEWLINE join('src', '_simd', '_simd_arg.inc'),NEWLINE join('src', '_simd', '_simd_convert.inc'),NEWLINE join('src', '_simd', '_simd_easyintrin.inc'),NEWLINE join('src', '_simd', '_simd_vector.inc'),NEWLINE ])NEWLINENEWLINE config.add_subpackage('tests')NEWLINE config.add_data_dir('tests/data')NEWLINE config.add_data_dir('tests/examples')NEWLINE config.add_data_files('*.pyi')NEWLINENEWLINE config.make_svn_version_py()NEWLINENEWLINE return configNEWLINENEWLINEif __name__ == '__main__':NEWLINE from numpy.distutils.core import setupNEWLINE setup(configuration=configuration)NEWLINE
"""NEWLINEModule: 'ujson' on micropython-v1.15-esp8266NEWLINE"""NEWLINE# MCU: {'ver': 'v1.15', 'port': 'esp8266', 'arch': 'xtensa', 'sysname': 'esp8266', 'release': '1.15', 'name': 'micropython', 'mpy': 9733, 'version': '1.15', 'machine': 'ESP module with ESP8266', 'build': '', 'nodename': 'esp8266', 'platform': 'esp8266', 'family': 'micropython'}NEWLINE# Stubber: 1.5.4NEWLINEfrom typing import AnyNEWLINENEWLINENEWLINEdef dump(*args, **kwargs) -> Any:NEWLINE ...NEWLINENEWLINENEWLINEdef dumps(*args, **kwargs) -> Any:NEWLINE ...NEWLINENEWLINENEWLINEdef load(*args, **kwargs) -> Any:NEWLINE ...NEWLINENEWLINENEWLINEdef loads(*args, **kwargs) -> Any:NEWLINE ...NEWLINE
###############################################################################NEWLINE# Copyright Kitware Inc. and ContributorsNEWLINE# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)NEWLINE# See accompanying Copyright.txt and LICENSE files for detailsNEWLINE###############################################################################NEWLINENEWLINE"""NEWLINEReferences:NEWLINE https://github.com/alykhantejani/nninitNEWLINE"""NEWLINEimport numpy as npNEWLINEimport torchNEWLINEfrom torch.autograd import VariableNEWLINENEWLINENEWLINEdef uniform(tensor, a=0, b=1):NEWLINE """Fills the input Tensor or Variable with values drawn from a uniform U(a,b)NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE a: the lower bound of the uniform distributionNEWLINE b: the upper bound of the uniform distributionNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.uniform(w)NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE uniform(tensor.data, a=a, b=b)NEWLINE return tensorNEWLINE else:NEWLINE return tensor.uniform_(a, b)NEWLINENEWLINENEWLINEdef normal(tensor, mean=0, std=1):NEWLINE """Fills the input Tensor or Variable with values drawn from a normalNEWLINE distribution with the given mean and stdNEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE mean: the mean of the normal distributionNEWLINE std: the standard deviation of the normal distributionNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.normal(w)NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE normal(tensor.data, mean=mean, std=std)NEWLINE return tensorNEWLINE else:NEWLINE return tensor.normal_(mean, std)NEWLINENEWLINENEWLINEdef constant(tensor, val):NEWLINE """Fills the input Tensor or Variable with the value `val`NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE val: the value to fill the tensor withNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.constant(w)NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE constant(tensor.data, val)NEWLINE return tensorNEWLINE else:NEWLINE return tensor.fill_(val)NEWLINENEWLINENEWLINEdef _calculate_fan_in_and_fan_out(tensor):NEWLINE if tensor.ndimension() < 2:NEWLINE raise ValueError(NEWLINE "fan in and fan out can not be computed for tensor of size ", tensor.size())NEWLINENEWLINE if tensor.ndimension() == 2: # LinearNEWLINE fan_in = tensor.size(1)NEWLINE fan_out = tensor.size(0)NEWLINE else:NEWLINE num_input_fmaps = tensor.size(1)NEWLINE num_output_fmaps = tensor.size(0)NEWLINE receptive_field_size = np.prod(tensor.numpy().shape[2:])NEWLINE fan_in = num_input_fmaps * receptive_field_sizeNEWLINE fan_out = num_output_fmaps * receptive_field_sizeNEWLINENEWLINE return fan_in, fan_outNEWLINENEWLINENEWLINEdef xavier_uniform(tensor, gain=1):NEWLINE """NEWLINE Fills the input Tensor or Variable with values according to the methodNEWLINE described in "Understanding the difficulty of training deep feedforwardNEWLINE neural networks" - Glorot, X. and Bengio, Y., using a uniform distribution.NEWLINENEWLINE The resulting tensor will have values sampled from U(-a, a) whereNEWLINE a = gain * sqrt(2/(fan_in + fan_out))NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE gain: an optional scaling factor to be appliedNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.xavier_uniform(w, gain=np.sqrt(2.0))NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE xavier_uniform(tensor.data, gain=gain)NEWLINE return tensorNEWLINE else:NEWLINE fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)NEWLINE std = gain * np.sqrt(2.0 / (fan_in + fan_out))NEWLINE a = np.sqrt(3.0) * stdNEWLINE return tensor.uniform_(-a, a)NEWLINENEWLINENEWLINEdef xavier_normal(tensor, gain=1):NEWLINE """Fills the input Tensor or Variable with values according to the methodNEWLINE described in "Understanding the difficulty of trainingNEWLINE deep feedforward neural networks" - Glorot, X. and Bengio, Y., usingNEWLINE a normal distribution.NEWLINENEWLINE The resulting tensor will have values sampled from normal distribution with mean=0 andNEWLINE std = gain * sqrt(2/(fan_in + fan_out))NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE gain: an optional scaling factor to be appliedNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.xavier_normal(w, gain=np.sqrt(2.0))NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE xavier_normal(tensor.data, gain=gain)NEWLINE return tensorNEWLINE else:NEWLINE fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)NEWLINE std = gain * np.sqrt(2.0 / (fan_in + fan_out))NEWLINE return tensor.normal_(0, std)NEWLINENEWLINENEWLINEdef he_uniform(tensor, gain=1):NEWLINE """NEWLINE Fills the input Tensor or Variable with values according to the methodNEWLINE described in "Delving deep into rectifiers: Surpassing human-levelNEWLINE performance on ImageNet classification" - He, K. et al using a uniformNEWLINE distribution.NEWLINENEWLINE The resulting tensor will have values sampled from U(-a, a) where a = gain * sqrt(1/(fan_in))NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE gain: an optional scaling factor to be appliedNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.he_uniform(w, gain=np.sqrt(2.0))NEWLINE """NEWLINENEWLINE if isinstance(tensor, Variable):NEWLINE he_uniform(tensor.data, gain=gain)NEWLINE return tensorNEWLINE else:NEWLINE fan_in, _ = _calculate_fan_in_and_fan_out(tensor)NEWLINE std = gain * np.sqrt(1.0 / fan_in)NEWLINE a = np.sqrt(3.0) * stdNEWLINE return tensor.uniform_(-a, a)NEWLINENEWLINENEWLINEdef he_normal(tensor, gain=1):NEWLINE """NEWLINE Fills the input Tensor or Variable with values according to the methodNEWLINE described in "Delving deep into rectifiers: Surpassing human-levelNEWLINE performance on ImageNet classification" - He, K. et al using a normalNEWLINE distribution.NEWLINENEWLINE The resulting tensor will have values sampled from normal distribution withNEWLINE mean=0 and std = gain * sqrt(1/(fan_in))NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE gain: an optional scaling factor to be appliedNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> he_normal(w, gain=np.sqrt(2.0))NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE he_normal(tensor.data, gain=gain)NEWLINE return tensorNEWLINE else:NEWLINE fan_in, _ = _calculate_fan_in_and_fan_out(tensor)NEWLINE std = gain * np.sqrt(1.0 / fan_in)NEWLINE return tensor.normal_(0, std)NEWLINENEWLINENEWLINEdef orthogonal(tensor, gain=1):NEWLINE """Fills the input Tensor or Variable with a (semi) orthogonal matrix.NEWLINE The input tensor must have at least 2 dimensions,NEWLINE and for tensors with more than 2 dimensions the trailing dimensionsNEWLINE are flattened. viewed as 2D representation withNEWLINE rows equal to the first dimension and columns equal to the product ofNEWLINE as a sparse matrix, where the non-zero elementsNEWLINE will be drawn from a normal distribution with mean=0 and std=`std`.NEWLINE Reference: "Exact solutions to the nonlinear dynamics of learning inNEWLINE deep linear neural networks" - Saxe, A. et al.NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.Tensor, where n >= 2NEWLINE gain: optional gain to be appliedNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.orthogonal(w)NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE orthogonal(tensor.data, gain=gain)NEWLINE return tensorNEWLINE else:NEWLINE if tensor.ndimension() < 2:NEWLINE raise ValueError(NEWLINE "Only tensors with 2 or more dimensions are supported.")NEWLINENEWLINE flattened_shape = (tensor.size(0), int(NEWLINE np.prod(tensor.numpy().shape[1:])))NEWLINE flattened = torch.Tensor(NEWLINE flattened_shape[0], flattened_shape[1]).normal_(0, 1)NEWLINENEWLINE u, s, v = np.linalg.svd(flattened.numpy(), full_matrices=False)NEWLINE if u.shape == flattened.numpy().shape:NEWLINE tensor.view_as(flattened).copy_(torch.from_numpy(u))NEWLINE else:NEWLINE tensor.view_as(flattened).copy_(torch.from_numpy(v))NEWLINENEWLINE tensor.mul_(gain)NEWLINE return tensorNEWLINENEWLINENEWLINEdef sparse(tensor, sparsity, std=0.01):NEWLINE """Fills the 2D input Tensor or Variable as a sparse matrix,NEWLINE where the non-zero elements will be drawn from aNEWLINE normal distribution with mean=0 and std=`std`.NEWLINENEWLINE Args:NEWLINE tensor: a n-dimension torch.TensorNEWLINE sparsity: The fraction of elements in each column to be set to zeroNEWLINE std: the standard deviation of the normal distribution used to generate the non-zero valuesNEWLINENEWLINE Examples:NEWLINE >>> w = torch.Tensor(3, 5)NEWLINE >>> nninit.sparse(w, sparsity=0.1)NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE sparse(tensor.data, sparsity, std=std)NEWLINE return tensorNEWLINE else:NEWLINE if tensor.ndimension() != 2:NEWLINE raise ValueError(NEWLINE "Sparse initialization only supported for 2D inputs")NEWLINE tensor.normal_(0, std)NEWLINE rows, cols = tensor.size(0), tensor.size(1)NEWLINE num_zeros = int(np.ceil(cols * sparsity))NEWLINENEWLINE for col_idx in range(tensor.size(1)):NEWLINE row_indices = np.arange(rows)NEWLINE np.random.shuffle(row_indices)NEWLINE zero_indices = row_indices[:num_zeros]NEWLINE tensor.numpy()[zero_indices, col_idx] = 0NEWLINENEWLINE return tensorNEWLINENEWLINENEWLINEdef shock_he(tensor, gain=.00001):NEWLINE """NEWLINE Adds a very small he initial values to current tensor state.NEWLINE Helps tensor achieve full rank in case it lost it.NEWLINENEWLINE Example:NEWLINE >>> tensor = torch.eye(3, 3)NEWLINE >>> tensor[0, 0] = 0NEWLINE >>> np.linalg.matrix_rank(tensor.numpy())NEWLINE 2NEWLINE >>> shock_he(tensor, gain=.00001)NEWLINE >>> np.linalg.matrix_rank(tensor.numpy())NEWLINE 3NEWLINE """NEWLINE if isinstance(tensor, Variable):NEWLINE shock(tensor.data, gain)NEWLINE return tensorNEWLINE else:NEWLINE fan_in, _ = _calculate_fan_in_and_fan_out(tensor)NEWLINE std = gain * np.sqrt(1.0 / fan_in)NEWLINE prb = torch.randn(tensor.shape) * stdNEWLINE tensor += prbNEWLINE return tensorNEWLINENEWLINENEWLINEdef shock(tensor, scale=.1):NEWLINE if isinstance(tensor, Variable):NEWLINE shock(tensor.data, scale)NEWLINE return tensorNEWLINE else:NEWLINE # shock by some fraction of the stdNEWLINE std = tensor.std() * scaleNEWLINE prb = torch.randn(tensor.shape) * stdNEWLINE tensor += prbNEWLINE return tensorNEWLINENEWLINENEWLINEdef shock_outward(tensor, scale=.1, a_min=.01):NEWLINE """NEWLINE send weights away from zeroNEWLINE """NEWLINENEWLINE if isinstance(tensor, Variable):NEWLINE shock_outward(tensor.data, scale)NEWLINE return tensorNEWLINE else:NEWLINE std = max(torch.abs(tensor).max(), a_min) * scaleNEWLINE # perterb outwardNEWLINE offset = np.abs(torch.randn(tensor.shape) * std) * torch.sign(tensor)NEWLINE tensor += offsetNEWLINE return tensorNEWLINE
"""Set up the Python API for dingz devices."""NEWLINEimport osNEWLINENEWLINEimport sysNEWLINENEWLINEfrom setuptools import setup, find_packagesNEWLINENEWLINEhere = os.path.abspath(os.path.dirname(__file__))NEWLINENEWLINEwith open(os.path.join(here, "README.rst"), encoding="utf-8") as readme:NEWLINE long_description = readme.read()NEWLINENEWLINEif sys.argv[-1] == "publish":NEWLINE os.system("python3 setup.py sdist upload")NEWLINE sys.exit()NEWLINENEWLINEsetup(NEWLINE name="python-dingz",NEWLINE version="0.4.0.dev1",NEWLINE description="Python API for interacting with Dingz devices",NEWLINE long_description=long_description,NEWLINE url="https://github.com/home-assistant-ecosystem/python-dingz",NEWLINE author="Fabian Affolter",NEWLINE author_email="fabian@affolter-engineering.ch",NEWLINE license="Apache License 2.0",NEWLINE install_requires=["aiohttp<4", "async_timeout<4", "click"],NEWLINE packages=find_packages(),NEWLINE zip_safe=True,NEWLINE include_package_data=True,NEWLINE entry_points={"console_scripts": ["dingz = dingz.cli:main"]},NEWLINE classifiers=[NEWLINE "Development Status :: 3 - Alpha",NEWLINE "Environment :: Console",NEWLINE "Intended Audience :: Developers",NEWLINE "License :: OSI Approved :: Apache Software License",NEWLINE "Operating System :: MacOS :: MacOS X",NEWLINE "Operating System :: Microsoft :: Windows",NEWLINE "Operating System :: POSIX",NEWLINE "Programming Language :: Python :: 3.7",NEWLINE "Programming Language :: Python :: 3.8",NEWLINE "Topic :: Utilities",NEWLINE ],NEWLINE)NEWLINE
# -*- coding: utf-8 -*-NEWLINEfrom __future__ import unicode_literalsNEWLINENEWLINEimport datetimeNEWLINEimport decimalNEWLINENEWLINEfrom django.template.defaultfilters import *NEWLINEfrom django.test import TestCaseNEWLINEfrom django.utils import sixNEWLINEfrom django.utils import unittest, translationNEWLINEfrom django.utils.safestring import SafeDataNEWLINEfrom django.utils.encoding import python_2_unicode_compatibleNEWLINENEWLINEfrom i18n import TransRealMixinNEWLINENEWLINENEWLINEclass DefaultFiltersTests(TestCase):NEWLINENEWLINE def test_floatformat(self):NEWLINE self.assertEqual(floatformat(7.7), '7.7')NEWLINE self.assertEqual(floatformat(7.0), '7')NEWLINE self.assertEqual(floatformat(0.7), '0.7')NEWLINE self.assertEqual(floatformat(0.07), '0.1')NEWLINE self.assertEqual(floatformat(0.007), '0.0')NEWLINE self.assertEqual(floatformat(0.0), '0')NEWLINE self.assertEqual(floatformat(7.7, 3), '7.700')NEWLINE self.assertEqual(floatformat(6.000000, 3), '6.000')NEWLINE self.assertEqual(floatformat(6.200000, 3), '6.200')NEWLINE self.assertEqual(floatformat(6.200000, -3), '6.200')NEWLINE self.assertEqual(floatformat(13.1031, -3), '13.103')NEWLINE self.assertEqual(floatformat(11.1197, -2), '11.12')NEWLINE self.assertEqual(floatformat(11.0000, -2), '11')NEWLINE self.assertEqual(floatformat(11.000001, -2), '11.00')NEWLINE self.assertEqual(floatformat(8.2798, 3), '8.280')NEWLINE self.assertEqual(floatformat(5555.555, 2), '5555.56')NEWLINE self.assertEqual(floatformat(001.3000, 2), '1.30')NEWLINE self.assertEqual(floatformat(0.12345, 2), '0.12')NEWLINE self.assertEqual(floatformat(decimal.Decimal('555.555'), 2), '555.56')NEWLINE self.assertEqual(floatformat(decimal.Decimal('09.000')), '9')NEWLINE self.assertEqual(floatformat('foo'), '')NEWLINE self.assertEqual(floatformat(13.1031, 'bar'), '13.1031')NEWLINE self.assertEqual(floatformat(18.125, 2), '18.13')NEWLINE self.assertEqual(floatformat('foo', 'bar'), '')NEWLINE self.assertEqual(floatformat('¿Cómo esta usted?'), '')NEWLINE self.assertEqual(floatformat(None), '')NEWLINENEWLINE # Check that we're not converting to scientific notation.NEWLINE self.assertEqual(floatformat(0, 6), '0.000000')NEWLINE self.assertEqual(floatformat(0, 7), '0.0000000')NEWLINE self.assertEqual(floatformat(0, 10), '0.0000000000')NEWLINE self.assertEqual(floatformat(0.000000000000000000015, 20),NEWLINE '0.00000000000000000002')NEWLINENEWLINE pos_inf = float(1e30000)NEWLINE self.assertEqual(floatformat(pos_inf), six.text_type(pos_inf))NEWLINENEWLINE neg_inf = float(-1e30000)NEWLINE self.assertEqual(floatformat(neg_inf), six.text_type(neg_inf))NEWLINENEWLINE nan = pos_inf / pos_infNEWLINE self.assertEqual(floatformat(nan), six.text_type(nan))NEWLINENEWLINE class FloatWrapper(object):NEWLINE def __init__(self, value):NEWLINE self.value = valueNEWLINE def __float__(self):NEWLINE return self.valueNEWLINENEWLINE self.assertEqual(floatformat(FloatWrapper(11.000001), -2), '11.00')NEWLINENEWLINE # Regression for #15789NEWLINE decimal_ctx = decimal.getcontext()NEWLINE old_prec, decimal_ctx.prec = decimal_ctx.prec, 2NEWLINE try:NEWLINE self.assertEqual(floatformat(1.2345, 2), '1.23')NEWLINE self.assertEqual(floatformat(15.2042, -3), '15.204')NEWLINE self.assertEqual(floatformat(1.2345, '2'), '1.23')NEWLINE self.assertEqual(floatformat(15.2042, '-3'), '15.204')NEWLINE self.assertEqual(floatformat(decimal.Decimal('1.2345'), 2), '1.23')NEWLINE self.assertEqual(floatformat(decimal.Decimal('15.2042'), -3), '15.204')NEWLINE finally:NEWLINE decimal_ctx.prec = old_precNEWLINENEWLINENEWLINE def test_floatformat_py2_fail(self):NEWLINE self.assertEqual(floatformat(1.00000000000000015, 16), '1.0000000000000002')NEWLINENEWLINE # The test above fails because of Python 2's float handling. Floats withNEWLINE # many zeroes after the decimal point should be passed in as another typeNEWLINE # such as unicode or Decimal.NEWLINE if not six.PY3:NEWLINE test_floatformat_py2_fail = unittest.expectedFailure(test_floatformat_py2_fail)NEWLINENEWLINENEWLINE def test_addslashes(self):NEWLINE self.assertEqual(addslashes('"double quotes" and \'single quotes\''),NEWLINE '\\"double quotes\\" and \\\'single quotes\\\'')NEWLINENEWLINE self.assertEqual(addslashes(r'\ : backslashes, too'),NEWLINE '\\\\ : backslashes, too')NEWLINENEWLINE def test_capfirst(self):NEWLINE self.assertEqual(capfirst('hello world'), 'Hello world')NEWLINENEWLINE def test_escapejs(self):NEWLINE self.assertEqual(escapejs_filter('"double quotes" and \'single quotes\''),NEWLINE '\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027')NEWLINE self.assertEqual(escapejs_filter(r'\ : backslashes, too'),NEWLINE '\\u005C : backslashes, too')NEWLINE self.assertEqual(escapejs_filter('and lots of whitespace: \r\n\t\v\f\b'),NEWLINE 'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008')NEWLINE self.assertEqual(escapejs_filter(r'<script>and this</script>'),NEWLINE '\\u003Cscript\\u003Eand this\\u003C/script\\u003E')NEWLINE self.assertEqual(NEWLINE escapejs_filter('paragraph separator:\u2029and line separator:\u2028'),NEWLINE 'paragraph separator:\\u2029and line separator:\\u2028')NEWLINENEWLINE def test_fix_ampersands(self):NEWLINE self.assertEqual(fix_ampersands_filter('Jack & Jill & Jeroboam'),NEWLINE 'Jack &amp; Jill &amp; Jeroboam')NEWLINENEWLINE def test_linenumbers(self):NEWLINE self.assertEqual(linenumbers('line 1\nline 2'),NEWLINE '1. line 1\n2. line 2')NEWLINE self.assertEqual(linenumbers('\n'.join(['x'] * 10)),NEWLINE '01. x\n02. x\n03. x\n04. x\n05. x\n06. x\n07. '\NEWLINE 'x\n08. x\n09. x\n10. x')NEWLINENEWLINE def test_lower(self):NEWLINE self.assertEqual(lower('TEST'), 'test')NEWLINENEWLINE # uppercase E umlautNEWLINE self.assertEqual(lower('\xcb'), '\xeb')NEWLINENEWLINE def test_make_list(self):NEWLINE self.assertEqual(make_list('abc'), ['a', 'b', 'c'])NEWLINE self.assertEqual(make_list(1234), ['1', '2', '3', '4'])NEWLINENEWLINE def test_slugify(self):NEWLINE self.assertEqual(slugify(' Jack & Jill like numbers 1,2,3 and 4 and'\NEWLINE ' silly characters ?%.$!/'),NEWLINE 'jack-jill-like-numbers-123-and-4-and-silly-characters')NEWLINENEWLINE self.assertEqual(slugify("Un \xe9l\xe9phant \xe0 l'or\xe9e du bois"),NEWLINE 'un-elephant-a-loree-du-bois')NEWLINENEWLINE def test_stringformat(self):NEWLINE self.assertEqual(stringformat(1, '03d'), '001')NEWLINE self.assertEqual(stringformat(1, 'z'), '')NEWLINENEWLINE def test_title(self):NEWLINE self.assertEqual(title('a nice title, isn\'t it?'),NEWLINE "A Nice Title, Isn't It?")NEWLINE self.assertEqual(title('discoth\xe8que'), 'Discoth\xe8que')NEWLINENEWLINE def test_truncatewords(self):NEWLINE self.assertEqual(NEWLINE truncatewords('A sentence with a few words in it', 1), 'A ...')NEWLINE self.assertEqual(NEWLINE truncatewords('A sentence with a few words in it', 5),NEWLINE 'A sentence with a few ...')NEWLINE self.assertEqual(NEWLINE truncatewords('A sentence with a few words in it', 100),NEWLINE 'A sentence with a few words in it')NEWLINE self.assertEqual(NEWLINE truncatewords('A sentence with a few words in it',NEWLINE 'not a number'), 'A sentence with a few words in it')NEWLINENEWLINE def test_truncatewords_html(self):NEWLINE self.assertEqual(truncatewords_html(NEWLINE '<p>one <a href="#">two - three <br>four</a> five</p>', 0), '')NEWLINE self.assertEqual(truncatewords_html('<p>one <a href="#">two - '\NEWLINE 'three <br>four</a> five</p>', 2),NEWLINE '<p>one <a href="#">two ...</a></p>')NEWLINE self.assertEqual(truncatewords_html(NEWLINE '<p>one <a href="#">two - three <br>four</a> five</p>', 4),NEWLINE '<p>one <a href="#">two - three <br>four ...</a></p>')NEWLINE self.assertEqual(truncatewords_html(NEWLINE '<p>one <a href="#">two - three <br>four</a> five</p>', 5),NEWLINE '<p>one <a href="#">two - three <br>four</a> five</p>')NEWLINE self.assertEqual(truncatewords_html(NEWLINE '<p>one <a href="#">two - three <br>four</a> five</p>', 100),NEWLINE '<p>one <a href="#">two - three <br>four</a> five</p>')NEWLINE self.assertEqual(truncatewords_html(NEWLINE '\xc5ngstr\xf6m was here', 1), '\xc5ngstr\xf6m ...')NEWLINENEWLINE def test_upper(self):NEWLINE self.assertEqual(upper('Mixed case input'), 'MIXED CASE INPUT')NEWLINE # lowercase e umlautNEWLINE self.assertEqual(upper('\xeb'), '\xcb')NEWLINENEWLINE def test_urlencode(self):NEWLINE self.assertEqual(urlencode('fran\xe7ois & jill'),NEWLINE 'fran%C3%A7ois%20%26%20jill')NEWLINE self.assertEqual(urlencode(1), '1')NEWLINENEWLINE def test_iriencode(self):NEWLINE self.assertEqual(iriencode('S\xf8r-Tr\xf8ndelag'),NEWLINE 'S%C3%B8r-Tr%C3%B8ndelag')NEWLINE self.assertEqual(iriencode(urlencode('fran\xe7ois & jill')),NEWLINE 'fran%C3%A7ois%20%26%20jill')NEWLINENEWLINE def test_urlizetrunc(self):NEWLINE self.assertEqual(urlizetrunc('http://short.com/', 20), '<a href='\NEWLINE '"http://short.com/" rel="nofollow">http://short.com/</a>')NEWLINENEWLINE self.assertEqual(urlizetrunc('http://www.google.co.uk/search?hl=en'\NEWLINE '&q=some+long+url&btnG=Search&meta=', 20), '<a href="http://'\NEWLINE 'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&'\NEWLINE 'meta=" rel="nofollow">http://www.google...</a>')NEWLINENEWLINE self.assertEqual(urlizetrunc('http://www.google.co.uk/search?hl=en'\NEWLINE '&q=some+long+url&btnG=Search&meta=', 20), '<a href="http://'\NEWLINE 'www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search'\NEWLINE '&meta=" rel="nofollow">http://www.google...</a>')NEWLINENEWLINE # Check truncating of URIs which are the exact lengthNEWLINE uri = 'http://31characteruri.com/test/'NEWLINE self.assertEqual(len(uri), 31)NEWLINENEWLINE self.assertEqual(urlizetrunc(uri, 31),NEWLINE '<a href="http://31characteruri.com/test/" rel="nofollow">'\NEWLINE 'http://31characteruri.com/test/</a>')NEWLINENEWLINE self.assertEqual(urlizetrunc(uri, 30),NEWLINE '<a href="http://31characteruri.com/test/" rel="nofollow">'\NEWLINE 'http://31characteruri.com/t...</a>')NEWLINENEWLINE self.assertEqual(urlizetrunc(uri, 2),NEWLINE '<a href="http://31characteruri.com/test/"'\NEWLINE ' rel="nofollow">...</a>')NEWLINENEWLINE def test_urlize(self):NEWLINE # Check normal urlizeNEWLINE self.assertEqual(urlize('http://google.com'),NEWLINE '<a href="http://google.com" rel="nofollow">http://google.com</a>')NEWLINE self.assertEqual(urlize('http://google.com/'),NEWLINE '<a href="http://google.com/" rel="nofollow">http://google.com/</a>')NEWLINE self.assertEqual(urlize('www.google.com'),NEWLINE '<a href="http://www.google.com" rel="nofollow">www.google.com</a>')NEWLINE self.assertEqual(urlize('djangoproject.org'),NEWLINE '<a href="http://djangoproject.org" rel="nofollow">djangoproject.org</a>')NEWLINE self.assertEqual(urlize('info@djangoproject.org'),NEWLINE '<a href="mailto:info@djangoproject.org">info@djangoproject.org</a>')NEWLINENEWLINE # Check urlize with https addressesNEWLINE self.assertEqual(urlize('https://google.com'),NEWLINE '<a href="https://google.com" rel="nofollow">https://google.com</a>')NEWLINENEWLINE # Check urlize doesn't overquote already quoted urls - see #9655NEWLINE self.assertEqual(urlize('http://hi.baidu.com/%D6%D8%D0%C2%BF'),NEWLINE '<a href="http://hi.baidu.com/%D6%D8%D0%C2%BF" rel="nofollow">'NEWLINE 'http://hi.baidu.com/%D6%D8%D0%C2%BF</a>')NEWLINE self.assertEqual(urlize('www.mystore.com/30%OffCoupons!'),NEWLINE '<a href="http://www.mystore.com/30%25OffCoupons!" rel="nofollow">'NEWLINE 'www.mystore.com/30%OffCoupons!</a>')NEWLINE self.assertEqual(urlize('http://en.wikipedia.org/wiki/Caf%C3%A9'),NEWLINE '<a href="http://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">'NEWLINE 'http://en.wikipedia.org/wiki/Caf%C3%A9</a>')NEWLINE self.assertEqual(urlize('http://en.wikipedia.org/wiki/Café'),NEWLINE '<a href="http://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">'NEWLINE 'http://en.wikipedia.org/wiki/Café</a>')NEWLINENEWLINE # Check urlize keeps balanced parentheses - see #11911NEWLINE self.assertEqual(urlize('http://en.wikipedia.org/wiki/Django_(web_framework)'),NEWLINE '<a href="http://en.wikipedia.org/wiki/Django_(web_framework)" rel="nofollow">'NEWLINE 'http://en.wikipedia.org/wiki/Django_(web_framework)</a>')NEWLINE self.assertEqual(urlize('(see http://en.wikipedia.org/wiki/Django_(web_framework))'),NEWLINE '(see <a href="http://en.wikipedia.org/wiki/Django_(web_framework)" rel="nofollow">'NEWLINE 'http://en.wikipedia.org/wiki/Django_(web_framework)</a>)')NEWLINENEWLINE # Check urlize adds nofollow properly - see #12183NEWLINE self.assertEqual(urlize('foo@bar.com or www.bar.com'),NEWLINE '<a href="mailto:foo@bar.com">foo@bar.com</a> or 'NEWLINE '<a href="http://www.bar.com" rel="nofollow">www.bar.com</a>')NEWLINENEWLINE # Check urlize handles IDN correctly - see #13704NEWLINE self.assertEqual(urlize('http://c✶.ws'),NEWLINE '<a href="http://xn--c-lgq.ws" rel="nofollow">http://c✶.ws</a>')NEWLINE self.assertEqual(urlize('www.c✶.ws'),NEWLINE '<a href="http://www.xn--c-lgq.ws" rel="nofollow">www.c✶.ws</a>')NEWLINE self.assertEqual(urlize('c✶.org'),NEWLINE '<a href="http://xn--c-lgq.org" rel="nofollow">c✶.org</a>')NEWLINE self.assertEqual(urlize('info@c✶.org'),NEWLINE '<a href="mailto:info@xn--c-lgq.org">info@c✶.org</a>')NEWLINENEWLINE # Check urlize doesn't highlight malformed URIs - see #16395NEWLINE self.assertEqual(urlize('http:///www.google.com'),NEWLINE 'http:///www.google.com')NEWLINE self.assertEqual(urlize('http://.google.com'),NEWLINE 'http://.google.com')NEWLINE self.assertEqual(urlize('http://@foo.com'),NEWLINE 'http://@foo.com')NEWLINENEWLINE # Check urlize accepts more TLDs - see #16656NEWLINE self.assertEqual(urlize('usa.gov'),NEWLINE '<a href="http://usa.gov" rel="nofollow">usa.gov</a>')NEWLINENEWLINE # Check urlize don't crash on invalid email with dot-starting domain - see #17592NEWLINE self.assertEqual(urlize('email@.stream.ru'),NEWLINE 'email@.stream.ru')NEWLINENEWLINE # Check urlize accepts uppercased URL schemes - see #18071NEWLINE self.assertEqual(urlize('HTTPS://github.com/'),NEWLINE '<a href="https://github.com/" rel="nofollow">HTTPS://github.com/</a>')NEWLINENEWLINE # Check urlize trims trailing period when followed by parenthesis - see #18644NEWLINE self.assertEqual(urlize('(Go to http://www.example.com/foo.)'),NEWLINE '(Go to <a href="http://www.example.com/foo" rel="nofollow">http://www.example.com/foo</a>.)')NEWLINENEWLINE # Check urlize handles brackets properly (#19070)NEWLINE self.assertEqual(urlize('[see www.example.com]'),NEWLINE '[see <a href="http://www.example.com" rel="nofollow">www.example.com</a>]' )NEWLINE self.assertEqual(urlize('see test[at[example.com'),NEWLINE 'see <a href="http://test[at[example.com" rel="nofollow">test[at[example.com</a>' )NEWLINE self.assertEqual(urlize('[http://168.192.0.1](http://168.192.0.1)'),NEWLINE '[<a href="http://168.192.0.1](http://168.192.0.1)" rel="nofollow">http://168.192.0.1](http://168.192.0.1)</a>')NEWLINENEWLINE # Check urlize works with IPv4/IPv6 addressesNEWLINE self.assertEqual(urlize('http://192.168.0.15/api/9'),NEWLINE '<a href="http://192.168.0.15/api/9" rel="nofollow">http://192.168.0.15/api/9</a>')NEWLINE self.assertEqual(urlize('http://[2001:db8:cafe::2]/api/9'),NEWLINE '<a href="http://[2001:db8:cafe::2]/api/9" rel="nofollow">http://[2001:db8:cafe::2]/api/9</a>')NEWLINENEWLINE def test_wordcount(self):NEWLINE self.assertEqual(wordcount(''), 0)NEWLINE self.assertEqual(wordcount('oneword'), 1)NEWLINE self.assertEqual(wordcount('lots of words'), 3)NEWLINENEWLINE self.assertEqual(wordwrap('this is a long paragraph of text that '\NEWLINE 'really needs to be wrapped I\'m afraid', 14),NEWLINE "this is a long\nparagraph of\ntext that\nreally needs\nto be "\NEWLINE "wrapped\nI'm afraid")NEWLINENEWLINE self.assertEqual(wordwrap('this is a short paragraph of text.\n '\NEWLINE 'But this line should be indented', 14),NEWLINE 'this is a\nshort\nparagraph of\ntext.\n But this\nline '\NEWLINE 'should be\nindented')NEWLINENEWLINE self.assertEqual(wordwrap('this is a short paragraph of text.\n '\NEWLINE 'But this line should be indented',15), 'this is a short\n'\NEWLINE 'paragraph of\ntext.\n But this line\nshould be\nindented')NEWLINENEWLINE def test_rjust(self):NEWLINE self.assertEqual(ljust('test', 10), 'test ')NEWLINE self.assertEqual(ljust('test', 3), 'test')NEWLINE self.assertEqual(rjust('test', 10), ' test')NEWLINE self.assertEqual(rjust('test', 3), 'test')NEWLINENEWLINE def test_center(self):NEWLINE self.assertEqual(center('test', 6), ' test ')NEWLINENEWLINE def test_cut(self):NEWLINE self.assertEqual(cut('a string to be mangled', 'a'),NEWLINE ' string to be mngled')NEWLINE self.assertEqual(cut('a string to be mangled', 'ng'),NEWLINE 'a stri to be maled')NEWLINE self.assertEqual(cut('a string to be mangled', 'strings'),NEWLINE 'a string to be mangled')NEWLINENEWLINE def test_force_escape(self):NEWLINE escaped = force_escape('<some html & special characters > here')NEWLINE self.assertEqual(NEWLINE escaped, '&lt;some html &amp; special characters &gt; here')NEWLINE self.assertIsInstance(escaped, SafeData)NEWLINE self.assertEqual(NEWLINE force_escape('<some html & special characters > here ĐÅ€£'),NEWLINE '&lt;some html &amp; special characters &gt; here'\NEWLINE ' \u0110\xc5\u20ac\xa3')NEWLINENEWLINE def test_linebreaks(self):NEWLINE self.assertEqual(linebreaks_filter('line 1'), '<p>line 1</p>')NEWLINE self.assertEqual(linebreaks_filter('line 1\nline 2'),NEWLINE '<p>line 1<br />line 2</p>')NEWLINE self.assertEqual(linebreaks_filter('line 1\rline 2'),NEWLINE '<p>line 1<br />line 2</p>')NEWLINE self.assertEqual(linebreaks_filter('line 1\r\nline 2'),NEWLINE '<p>line 1<br />line 2</p>')NEWLINENEWLINE def test_linebreaksbr(self):NEWLINE self.assertEqual(linebreaksbr('line 1\nline 2'),NEWLINE 'line 1<br />line 2')NEWLINE self.assertEqual(linebreaksbr('line 1\rline 2'),NEWLINE 'line 1<br />line 2')NEWLINE self.assertEqual(linebreaksbr('line 1\r\nline 2'),NEWLINE 'line 1<br />line 2')NEWLINENEWLINE def test_removetags(self):NEWLINE self.assertEqual(removetags('some <b>html</b> with <script>alert'\NEWLINE '("You smell")</script> disallowed <img /> tags', 'script img'),NEWLINE 'some <b>html</b> with alert("You smell") disallowed tags')NEWLINE self.assertEqual(striptags('some <b>html</b> with <script>alert'\NEWLINE '("You smell")</script> disallowed <img /> tags'),NEWLINE 'some html with alert("You smell") disallowed tags')NEWLINENEWLINE def test_dictsort(self):NEWLINE sorted_dicts = dictsort([{'age': 23, 'name': 'Barbara-Ann'},NEWLINE {'age': 63, 'name': 'Ra Ra Rasputin'},NEWLINE {'name': 'Jonny B Goode', 'age': 18}], 'age')NEWLINENEWLINE self.assertEqual([sorted(dict.items()) for dict in sorted_dicts],NEWLINE [[('age', 18), ('name', 'Jonny B Goode')],NEWLINE [('age', 23), ('name', 'Barbara-Ann')],NEWLINE [('age', 63), ('name', 'Ra Ra Rasputin')]])NEWLINENEWLINE # If it gets passed a list of something else different fromNEWLINE # dictionaries it should fail silentlyNEWLINE self.assertEqual(dictsort([1, 2, 3], 'age'), '')NEWLINE self.assertEqual(dictsort('Hello!', 'age'), '')NEWLINE self.assertEqual(dictsort({'a': 1}, 'age'), '')NEWLINE self.assertEqual(dictsort(1, 'age'), '')NEWLINENEWLINE def test_dictsortreversed(self):NEWLINE sorted_dicts = dictsortreversed([{'age': 23, 'name': 'Barbara-Ann'},NEWLINE {'age': 63, 'name': 'Ra Ra Rasputin'},NEWLINE {'name': 'Jonny B Goode', 'age': 18}],NEWLINE 'age')NEWLINENEWLINE self.assertEqual([sorted(dict.items()) for dict in sorted_dicts],NEWLINE [[('age', 63), ('name', 'Ra Ra Rasputin')],NEWLINE [('age', 23), ('name', 'Barbara-Ann')],NEWLINE [('age', 18), ('name', 'Jonny B Goode')]])NEWLINENEWLINE # If it gets passed a list of something else different fromNEWLINE # dictionaries it should fail silentlyNEWLINE self.assertEqual(dictsortreversed([1, 2, 3], 'age'), '')NEWLINE self.assertEqual(dictsortreversed('Hello!', 'age'), '')NEWLINE self.assertEqual(dictsortreversed({'a': 1}, 'age'), '')NEWLINE self.assertEqual(dictsortreversed(1, 'age'), '')NEWLINENEWLINE def test_first(self):NEWLINE self.assertEqual(first([0,1,2]), 0)NEWLINE self.assertEqual(first(''), '')NEWLINE self.assertEqual(first('test'), 't')NEWLINENEWLINE def test_join(self):NEWLINE self.assertEqual(join([0,1,2], 'glue'), '0glue1glue2')NEWLINENEWLINE def test_length(self):NEWLINE self.assertEqual(length('1234'), 4)NEWLINE self.assertEqual(length([1,2,3,4]), 4)NEWLINE self.assertEqual(length_is([], 0), True)NEWLINE self.assertEqual(length_is([], 1), False)NEWLINE self.assertEqual(length_is('a', 1), True)NEWLINE self.assertEqual(length_is('a', 10), False)NEWLINENEWLINE def test_slice(self):NEWLINE self.assertEqual(slice_filter('abcdefg', '0'), '')NEWLINE self.assertEqual(slice_filter('abcdefg', '1'), 'a')NEWLINE self.assertEqual(slice_filter('abcdefg', '-1'), 'abcdef')NEWLINE self.assertEqual(slice_filter('abcdefg', '1:2'), 'b')NEWLINE self.assertEqual(slice_filter('abcdefg', '1:3'), 'bc')NEWLINE self.assertEqual(slice_filter('abcdefg', '0::2'), 'aceg')NEWLINENEWLINE def test_unordered_list(self):NEWLINE self.assertEqual(unordered_list(['item 1', 'item 2']),NEWLINE '\t<li>item 1</li>\n\t<li>item 2</li>')NEWLINE self.assertEqual(unordered_list(['item 1', ['item 1.1']]),NEWLINE '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>')NEWLINENEWLINE self.assertEqual(NEWLINE unordered_list(['item 1', ['item 1.1', 'item1.2'], 'item 2']),NEWLINE '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t\t<li>item1.2'\NEWLINE '</li>\n\t</ul>\n\t</li>\n\t<li>item 2</li>')NEWLINENEWLINE self.assertEqual(NEWLINE unordered_list(['item 1', ['item 1.1', ['item 1.1.1',NEWLINE ['item 1.1.1.1']]]]),NEWLINE '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1\n\t\t<ul>\n\t\t\t<li>'\NEWLINE 'item 1.1.1\n\t\t\t<ul>\n\t\t\t\t<li>item 1.1.1.1</li>\n\t\t\t'\NEWLINE '</ul>\n\t\t\t</li>\n\t\t</ul>\n\t\t</li>\n\t</ul>\n\t</li>')NEWLINENEWLINE self.assertEqual(unordered_list(NEWLINE ['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]),NEWLINE '\t<li>States\n\t<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>'\NEWLINE 'Lawrence</li>\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>'\NEWLINE '\n\t\t<li>Illinois</li>\n\t</ul>\n\t</li>')NEWLINENEWLINE @python_2_unicode_compatibleNEWLINE class ULItem(object):NEWLINE def __init__(self, title):NEWLINE self.title = titleNEWLINE def __str__(self):NEWLINE return 'ulitem-%s' % str(self.title)NEWLINENEWLINE a = ULItem('a')NEWLINE b = ULItem('b')NEWLINE self.assertEqual(unordered_list([a,b]),NEWLINE '\t<li>ulitem-a</li>\n\t<li>ulitem-b</li>')NEWLINENEWLINE # Old format for unordered lists should still workNEWLINE self.assertEqual(unordered_list(['item 1', []]), '\t<li>item 1</li>')NEWLINENEWLINE self.assertEqual(unordered_list(['item 1', [['item 1.1', []]]]),NEWLINE '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1</li>\n\t</ul>\n\t</li>')NEWLINENEWLINE self.assertEqual(unordered_list(['item 1', [['item 1.1', []],NEWLINE ['item 1.2', []]]]), '\t<li>item 1\n\t<ul>\n\t\t<li>item 1.1'\NEWLINE '</li>\n\t\t<li>item 1.2</li>\n\t</ul>\n\t</li>')NEWLINENEWLINE self.assertEqual(unordered_list(['States', [['Kansas', [['Lawrence',NEWLINE []], ['Topeka', []]]], ['Illinois', []]]]), '\t<li>States\n\t'\NEWLINE '<ul>\n\t\t<li>Kansas\n\t\t<ul>\n\t\t\t<li>Lawrence</li>'\NEWLINE '\n\t\t\t<li>Topeka</li>\n\t\t</ul>\n\t\t</li>\n\t\t<li>'\NEWLINE 'Illinois</li>\n\t</ul>\n\t</li>')NEWLINENEWLINE def test_add(self):NEWLINE self.assertEqual(add('1', '2'), 3)NEWLINENEWLINE def test_get_digit(self):NEWLINE self.assertEqual(get_digit(123, 1), 3)NEWLINE self.assertEqual(get_digit(123, 2), 2)NEWLINE self.assertEqual(get_digit(123, 3), 1)NEWLINE self.assertEqual(get_digit(123, 4), 0)NEWLINE self.assertEqual(get_digit(123, 0), 123)NEWLINE self.assertEqual(get_digit('xyz', 0), 'xyz')NEWLINENEWLINE def test_date(self):NEWLINE # real testing of date() is in dateformat.pyNEWLINE self.assertEqual(date(datetime.datetime(2005, 12, 29), "d F Y"),NEWLINE '29 December 2005')NEWLINE self.assertEqual(date(datetime.datetime(2005, 12, 29), r'jS \o\f F'),NEWLINE '29th of December')NEWLINENEWLINE def test_time(self):NEWLINE # real testing of time() is done in dateformat.pyNEWLINE self.assertEqual(time(datetime.time(13), "h"), '01')NEWLINE self.assertEqual(time(datetime.time(0), "h"), '12')NEWLINENEWLINE def test_timesince(self):NEWLINE # real testing is done in timesince.py, where we can provide our own 'now'NEWLINE # NOTE: \xa0 avoids wrapping between value and unitNEWLINE self.assertEqual(NEWLINE timesince_filter(datetime.datetime.now() - datetime.timedelta(1)),NEWLINE '1\xa0day')NEWLINENEWLINE self.assertEqual(NEWLINE timesince_filter(datetime.datetime(2005, 12, 29),NEWLINE datetime.datetime(2005, 12, 30)),NEWLINE '1\xa0day')NEWLINENEWLINE def test_timeuntil(self):NEWLINE # NOTE: \xa0 avoids wrapping between value and unitNEWLINE self.assertEqual(NEWLINE timeuntil_filter(datetime.datetime.now() + datetime.timedelta(1, 1)),NEWLINE '1\xa0day')NEWLINENEWLINE self.assertEqual(NEWLINE timeuntil_filter(datetime.datetime(2005, 12, 30),NEWLINE datetime.datetime(2005, 12, 29)),NEWLINE '1\xa0day')NEWLINENEWLINE def test_default(self):NEWLINE self.assertEqual(default("val", "default"), 'val')NEWLINE self.assertEqual(default(None, "default"), 'default')NEWLINE self.assertEqual(default('', "default"), 'default')NEWLINENEWLINE def test_if_none(self):NEWLINE self.assertEqual(default_if_none("val", "default"), 'val')NEWLINE self.assertEqual(default_if_none(None, "default"), 'default')NEWLINE self.assertEqual(default_if_none('', "default"), '')NEWLINENEWLINE def test_divisibleby(self):NEWLINE self.assertEqual(divisibleby(4, 2), True)NEWLINE self.assertEqual(divisibleby(4, 3), False)NEWLINENEWLINE def test_yesno(self):NEWLINE self.assertEqual(yesno(True), 'yes')NEWLINE self.assertEqual(yesno(False), 'no')NEWLINE self.assertEqual(yesno(None), 'maybe')NEWLINE self.assertEqual(yesno(True, 'certainly,get out of town,perhaps'),NEWLINE 'certainly')NEWLINE self.assertEqual(yesno(False, 'certainly,get out of town,perhaps'),NEWLINE 'get out of town')NEWLINE self.assertEqual(yesno(None, 'certainly,get out of town,perhaps'),NEWLINE 'perhaps')NEWLINE self.assertEqual(yesno(None, 'certainly,get out of town'),NEWLINE 'get out of town')NEWLINENEWLINE def test_filesizeformat(self):NEWLINE # NOTE: \xa0 avoids wrapping between value and unitNEWLINE self.assertEqual(filesizeformat(1023), '1023\xa0bytes')NEWLINE self.assertEqual(filesizeformat(1024), '1.0\xa0KB')NEWLINE self.assertEqual(filesizeformat(10*1024), '10.0\xa0KB')NEWLINE self.assertEqual(filesizeformat(1024*1024-1), '1024.0\xa0KB')NEWLINE self.assertEqual(filesizeformat(1024*1024), '1.0\xa0MB')NEWLINE self.assertEqual(filesizeformat(1024*1024*50), '50.0\xa0MB')NEWLINE self.assertEqual(filesizeformat(1024*1024*1024-1), '1024.0\xa0MB')NEWLINE self.assertEqual(filesizeformat(1024*1024*1024), '1.0\xa0GB')NEWLINE self.assertEqual(filesizeformat(1024*1024*1024*1024), '1.0\xa0TB')NEWLINE self.assertEqual(filesizeformat(1024*1024*1024*1024*1024), '1.0\xa0PB')NEWLINE self.assertEqual(filesizeformat(1024*1024*1024*1024*1024*2000),NEWLINE '2000.0\xa0PB')NEWLINE self.assertEqual(filesizeformat(complex(1,-1)), '0\xa0bytes')NEWLINE self.assertEqual(filesizeformat(""), '0\xa0bytes')NEWLINE self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"),NEWLINE '0\xa0bytes')NEWLINENEWLINE def test_pluralize(self):NEWLINE self.assertEqual(pluralize(1), '')NEWLINE self.assertEqual(pluralize(0), 's')NEWLINE self.assertEqual(pluralize(2), 's')NEWLINE self.assertEqual(pluralize([1]), '')NEWLINE self.assertEqual(pluralize([]), 's')NEWLINE self.assertEqual(pluralize([1,2,3]), 's')NEWLINE self.assertEqual(pluralize(1,'es'), '')NEWLINE self.assertEqual(pluralize(0,'es'), 'es')NEWLINE self.assertEqual(pluralize(2,'es'), 'es')NEWLINE self.assertEqual(pluralize(1,'y,ies'), 'y')NEWLINE self.assertEqual(pluralize(0,'y,ies'), 'ies')NEWLINE self.assertEqual(pluralize(2,'y,ies'), 'ies')NEWLINE self.assertEqual(pluralize(0,'y,ies,error'), '')NEWLINENEWLINE def test_phone2numeric(self):NEWLINE self.assertEqual(phone2numeric_filter('0800 flowers'), '0800 3569377')NEWLINENEWLINE def test_non_string_input(self):NEWLINE # Filters shouldn't break if passed non-stringsNEWLINE self.assertEqual(addslashes(123), '123')NEWLINE self.assertEqual(linenumbers(123), '1. 123')NEWLINE self.assertEqual(lower(123), '123')NEWLINE self.assertEqual(make_list(123), ['1', '2', '3'])NEWLINE self.assertEqual(slugify(123), '123')NEWLINE self.assertEqual(title(123), '123')NEWLINE self.assertEqual(truncatewords(123, 2), '123')NEWLINE self.assertEqual(upper(123), '123')NEWLINE self.assertEqual(urlencode(123), '123')NEWLINE self.assertEqual(urlize(123), '123')NEWLINE self.assertEqual(urlizetrunc(123, 1), '123')NEWLINE self.assertEqual(wordcount(123), 1)NEWLINE self.assertEqual(wordwrap(123, 2), '123')NEWLINE self.assertEqual(ljust('123', 4), '123 ')NEWLINE self.assertEqual(rjust('123', 4), ' 123')NEWLINE self.assertEqual(center('123', 5), ' 123 ')NEWLINE self.assertEqual(center('123', 6), ' 123 ')NEWLINE self.assertEqual(cut(123, '2'), '13')NEWLINE self.assertEqual(escape(123), '123')NEWLINE self.assertEqual(linebreaks_filter(123), '<p>123</p>')NEWLINE self.assertEqual(linebreaksbr(123), '123')NEWLINE self.assertEqual(removetags(123, 'a'), '123')NEWLINE self.assertEqual(striptags(123), '123')NEWLINENEWLINENEWLINEclass DefaultFiltersI18NTests(TransRealMixin, TestCase):NEWLINENEWLINE def test_localized_filesizeformat(self):NEWLINE # NOTE: \xa0 avoids wrapping between value and unitNEWLINE with self.settings(USE_L10N=True):NEWLINE with translation.override('de', deactivate=True):NEWLINE self.assertEqual(filesizeformat(1023), '1023\xa0Bytes')NEWLINE self.assertEqual(filesizeformat(1024), '1,0\xa0KB')NEWLINE self.assertEqual(filesizeformat(10*1024), '10,0\xa0KB')NEWLINE self.assertEqual(filesizeformat(1024*1024-1), '1024,0\xa0KB')NEWLINE self.assertEqual(filesizeformat(1024*1024), '1,0\xa0MB')NEWLINE self.assertEqual(filesizeformat(1024*1024*50), '50,0\xa0MB')NEWLINE self.assertEqual(filesizeformat(1024*1024*1024-1), '1024,0\xa0MB')NEWLINE self.assertEqual(filesizeformat(1024*1024*1024), '1,0\xa0GB')NEWLINE self.assertEqual(filesizeformat(1024*1024*1024*1024), '1,0\xa0TB')NEWLINE self.assertEqual(filesizeformat(1024*1024*1024*1024*1024),NEWLINE '1,0\xa0PB')NEWLINE self.assertEqual(filesizeformat(1024*1024*1024*1024*1024*2000),NEWLINE '2000,0\xa0PB')NEWLINE self.assertEqual(filesizeformat(complex(1,-1)), '0\xa0Bytes')NEWLINE self.assertEqual(filesizeformat(""), '0\xa0Bytes')NEWLINE self.assertEqual(filesizeformat("\N{GREEK SMALL LETTER ALPHA}"),NEWLINE '0\xa0Bytes')NEWLINE
# -*- coding: utf-8 -*-NEWLINENEWLINE'''NEWLINE**Wavelet Based in CUSUM control chart for filtering signals Project (module**NEWLINE``statsWaveletFilt.miscellaneous`` **):** A Miscellaneous of functions forNEWLINEwork with data and show wavelet coefficientsNEWLINENEWLINE*Created by Tiarles Guterres, 2018*NEWLINE'''NEWLINENEWLINEdef showWaveletCoeff(coefficients, filename='tmp', format='pdf',NEWLINE threshold_value=0, color='black', color_threshold='black',NEWLINE figsize=(7, 8), title=''):NEWLINE '''NEWLINE Show and save the wavelet and scale coefficients in a plot.NEWLINENEWLINE ParametersNEWLINE ----------NEWLINE coeff: list of numpy.array'sNEWLINE With in '0' position the scale coefficients. Equal to theNEWLINE ``pywt.wavedec()`` return.NEWLINE filename: stringNEWLINE Optional, is 'tmp' by default. This is the first part of theNEWLINE name of the figure.NEWLINE format: stringNEWLINE Optional, is 'pdf' by default. This is the last part of the name of theNEWLINE figure. Can be 'png', 'ps', 'eps' and 'svg' too.NEWLINENEWLINE threshold_value: int, float or list.NEWLINE Optional, is 0 by default, this means that bothing new happens.NEWLINE Otherwise, a line in threshold value will be plotted in all waveletNEWLINE coefficients plots. This value can be a list too, but they was to beNEWLINE the same size of wavelet coefficients (without the scale coefficient).NEWLINENEWLINE ReturnsNEWLINE -------NEWLINE void:NEWLINE Nothing is returned, the plots is show and save.NEWLINENEWLINE See alsoNEWLINE --------NEWLINE pywt.wavedec: Function that decomposes the signal in wavelet andNEWLINE scale coefficientsNEWLINE pywt.waverec: Function that recomposes the signal from wavelet andNEWLINE scale coefficientsNEWLINENEWLINE filtration.filtration: Function that use this function to filter viaNEWLINE wavelet coefficientsNEWLINENEWLINE filtration.filtrationCusum: Function that use Cumulative Sum Control ChartNEWLINE and some variation for filter wavelet coefficients.NEWLINE '''NEWLINENEWLINE import numpy as npNEWLINE import matplotlib.pyplot as pltNEWLINENEWLINE if isinstance(threshold_value, (int, float, np.float64, np.int32,NEWLINE np.int64)):NEWLINE threshold_list = [threshold_value]*len(coefficients)NEWLINE else:NEWLINE threshold_list = [0] + list(threshold_value)NEWLINENEWLINE N = len(coefficients) - 1NEWLINENEWLINE fig, ax = plt.subplots(len(coefficients), 1, figsize=figsize)NEWLINENEWLINE ax[0].set_title(title)NEWLINENEWLINE # Scale CoefficientsNEWLINE ax[0].plot(coefficients[0], color=color, label='$c_0$ ($c_%d$)' % N)NEWLINE ax[0].legend(loc=1)NEWLINE ax[0].grid()NEWLINENEWLINE # Wavelet CoefficientsNEWLINENEWLINE for i in range(1, len(coefficients)):NEWLINE ax[i].plot(coefficients[i], color=color,NEWLINE label='$d_%d$ ($d_%d$)' % (i - 1, N - i + 1))NEWLINE if threshold_list[i] != 0:NEWLINE x_min, x_max = ax[i].get_xlim()NEWLINE ax[i].hlines(threshold_list[i], x_min, x_max,NEWLINE colors=color_threshold, linestyles='dashed')NEWLINENEWLINE ax[i].hlines(-threshold_list[i], x_min, x_max,NEWLINE colors=color_threshold, linestyles='dashed',NEWLINE label='$\\lambda$')NEWLINE ax[i].legend(loc=1)NEWLINE ax[i].grid()NEWLINENEWLINE plt.tight_layout()NEWLINE plt.savefig('%s' % filename+'.'+format)NEWLINE plt.show()NEWLINENEWLINE returnNEWLINENEWLINENEWLINEdef normalizeData(data, min=0, max=1):NEWLINE '''NEWLINE Its almost a map function. This function normalize the data between aNEWLINE min and max values.NEWLINENEWLINE ParametersNEWLINE ----------NEWLINENEWLINE data: list or array-likeNEWLINE The values that desire normalize.NEWLINE min: int or floatNEWLINE Optional, is -1 by default. The min value correspond, in the end,NEWLINE of the min value of data.NEWLINE max: int or floatNEWLINE Optional, is 1 by default. The max value correspond, in the end,NEWLINE of the max value of data.NEWLINENEWLINE ReturnsNEWLINE -------NEWLINE numpy.array:NEWLINE The data normalized between min and max values.NEWLINENEWLINE '''NEWLINENEWLINE import numpy as npNEWLINENEWLINE data = np.array(data)NEWLINENEWLINE new_data = data.copy()NEWLINE max_value = data.max()NEWLINE min_value = data.min()NEWLINENEWLINE diff_pp = max_value - min_valueNEWLINE diff_new_pp = max - minNEWLINENEWLINE new_data = new_data - min_valueNEWLINE new_data = new_data / diff_ppNEWLINENEWLINE new_data = new_data * diff_new_ppNEWLINE new_data = new_data + minNEWLINENEWLINE return new_dataNEWLINENEWLINENEWLINEdef generateData(functions=['doppler', 'block', 'bump', 'heavsine'],NEWLINE varNoises=[0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007,NEWLINE 0.008, 0.009, 0.010],NEWLINE dim_signals=1024,NEWLINE n_samples_per_sig_per_noise=10000, folder='tmp'):NEWLINE '''NEWLINE If you like to generate your dataset before run your test you can useNEWLINE this function to generate the data. With the 1) type of signal andNEWLINE 2) quantity of noise (in variance). Saves in ``.npy``NEWLINE '''NEWLINENEWLINE from statsWaveletFilt.signals import bumpFunction, blockFunctionNEWLINE from statsWaveletFilt.signals import dopplerFunction, heavsineFunctionNEWLINE import numpy as npNEWLINE import osNEWLINENEWLINE try:NEWLINE os.mkdir(folder)NEWLINE print('try: ', folder)NEWLINE except FileExistsError:NEWLINE passNEWLINENEWLINE n_it = n_samples_per_sig_per_noiseNEWLINENEWLINE functions_dic = {'doppler': dopplerFunction,NEWLINE 'block': blockFunction,NEWLINE 'bump': bumpFunction,NEWLINE 'heavsine': heavsineFunction}NEWLINENEWLINE functions_dic_used = {function: functions_dic[function]NEWLINE for function in functions}NEWLINENEWLINE for name, function in functions_dic_used.items():NEWLINE x, y = function(dim_signals)NEWLINE print('|----', name)NEWLINE NEWLINE try:NEWLINE os.mkdir(folder+'/'+name)NEWLINE except FileExistsError:NEWLINE passNEWLINE NEWLINE for varNoise in varNoises:NEWLINE counter = 0NEWLINE print('|----|----', varNoise)NEWLINE while counter < n_it:NEWLINE np.random.seed(counter)NEWLINE noise = np.random.normal(0, np.sqrt(varNoise), dim_signals)NEWLINENEWLINE sinalNoisy = y + noiseNEWLINENEWLINE filename = './%s/%s/%f_%d.npy' % (folder, name, varNoise,NEWLINE counter)NEWLINE NEWLINE np.save(filename, sinalNoisy)NEWLINE counter += 1NEWLINE
#!/usr/bin/env python3NEWLINENEWLINEimport osNEWLINEimport jsonNEWLINENEWLINE# print("Content-Type: application/json")NEWLINE# print()NEWLINE# print(json.dumps(dict(os.environ), indent=2))NEWLINENEWLINEprint("Content-Type: text/html")NEWLINEprint()NEWLINEprint(f"<p>QUERY_STRING={os.environ['QUERY_STRING']}")NEWLINEprint(f"<p>HTTP_USER_AGENT={os.environ['HTTP_USER_AGENT']}")
def read():NEWLINE numbers = []NEWLINE with open('./docs/numbers.txt', 'r', encoding='utf-8') as f:NEWLINE for line in f:NEWLINE numbers.append(int(line))NEWLINE print(numbers)NEWLINENEWLINEdef write():NEWLINE names = ['Jesús', 'Facundo', 'Miguel', 'Christian', 'Adal', 'Karol', 'Nicolás']NEWLINE with open('./docs/names.txt', 'a', encoding='utf-8') as f:NEWLINE for name in names:NEWLINE f.write(name)NEWLINE f.write('\n')NEWLINENEWLINEdef run():NEWLINE write()NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE run()
import sysNEWLINEimport importlib.resourcesNEWLINEimport pickleNEWLINEimport argparseNEWLINEimport reNEWLINEfrom contextlib import contextmanagerNEWLINEfrom collections import CounterNEWLINEfrom apycula import chipdbNEWLINENEWLINEclass Bba(object):NEWLINENEWLINE def __init__(self, file):NEWLINE self.file = fileNEWLINE self.block_idx = Counter()NEWLINENEWLINE def __getattr__(self, attr):NEWLINE def write_value(val):NEWLINE self.file.write(f"{attr} {val}\n")NEWLINE return write_valueNEWLINENEWLINE def str(self, val, sep="|"):NEWLINE self.file.write(f"str {sep}{val}{sep}\n")NEWLINENEWLINE @contextmanagerNEWLINE def block(self, prefix="block"):NEWLINE idx = self.block_idx[prefix]NEWLINE self.block_idx.update([prefix])NEWLINE name = f"{prefix}_{idx}"NEWLINE self.push(name)NEWLINE self.label(name)NEWLINE try:NEWLINE yield nameNEWLINE finally:NEWLINE self.pop(name)NEWLINENEWLINEconstids = ['']NEWLINEids = []NEWLINEdef id_string(s):NEWLINE try:NEWLINE return constids.index(s)NEWLINE except ValueError:NEWLINE passNEWLINE try:NEWLINE return len(constids)+ids.index(s)NEWLINE except ValueError:NEWLINE ids.append(s)NEWLINE return len(constids)+len(ids)-1NEWLINENEWLINEdef id_strings(b):NEWLINE with b.block('idstrings') as blk:NEWLINE for s in ids:NEWLINE b.str(s)NEWLINE b.u16(len(constids))NEWLINE b.u16(len(ids))NEWLINE b.ref(blk)NEWLINENEWLINEdef write_pips(b, pips):NEWLINE num = 0NEWLINE with b.block("pips") as blk:NEWLINE for dest, srcs in pips.items():NEWLINE for src in srcs:NEWLINE num += 1NEWLINE b.u16(id_string(dest))NEWLINE b.u16(id_string(src))NEWLINE b.u32(num)NEWLINE b.ref(blk)NEWLINENEWLINEdef write_bels(b, bels):NEWLINE with b.block("bels") as blk:NEWLINE for typ, bel in bels.items():NEWLINE if bel.simplified_iob:NEWLINE b.u16(id_string(f'{typ}S'))NEWLINE else:NEWLINE b.u16(id_string(typ))NEWLINE with b.block("portmap") as port_blk:NEWLINE for dest, src in bel.portmap.items():NEWLINE b.u16(id_string(dest))NEWLINE b.u16(id_string(src))NEWLINE b.u16(len(bel.portmap))NEWLINE b.ref(port_blk)NEWLINENEWLINENEWLINE b.u32(len(bels))NEWLINE b.ref(blk)NEWLINENEWLINEdef write_aliases(b, aliases):NEWLINE with b.block('aliases') as blk:NEWLINE for dest, src in aliases.items():NEWLINE b.u16(id_string(dest))NEWLINE b.u16(id_string(src))NEWLINE b.u32(len(aliases))NEWLINE b.ref(blk)NEWLINENEWLINEdef write_tile(b, tile):NEWLINE with b.block('tile') as blk:NEWLINE write_bels(b, tile.bels)NEWLINE write_pips(b, tile.pips)NEWLINE write_pips(b, tile.clock_pips)NEWLINE write_aliases(b, tile.aliases)NEWLINE return blkNEWLINENEWLINEdef write_grid(b, grid):NEWLINE tiles = {}NEWLINE with b.block('grid') as grid_block:NEWLINE for row in grid:NEWLINE for tile in row:NEWLINE if id(tile) in tiles:NEWLINE b.ref(tiles[id(tile)])NEWLINE else:NEWLINE blk = write_tile(b, tile)NEWLINE tiles[id(tile)] = blkNEWLINE b.ref(blk)NEWLINE b.ref(grid_block)NEWLINENEWLINENEWLINEdef write_global_aliases(b, db):NEWLINE with b.block('aliases') as blk:NEWLINE aliases = sorted(db.aliases.items(),NEWLINE key=lambda i: (i[0][0], i[0][1], id_string(i[0][2])))NEWLINE for (drow, dcol, dest), (srow, scol, src) in aliases:NEWLINE b.u16(drow)NEWLINE b.u16(dcol)NEWLINE b.u16(id_string(dest))NEWLINE b.u16(srow)NEWLINE b.u16(scol)NEWLINE b.u16(id_string(src))NEWLINE b.u32(len(db.aliases))NEWLINE b.ref(blk)NEWLINENEWLINEdef write_timing(b, timing):NEWLINE with b.block('timing') as blk:NEWLINE for speed, groups in timing.items():NEWLINE b.u32(id_string(speed))NEWLINE with b.block('timing_group') as tg:NEWLINE for group, types in groups.items():NEWLINE b.u32(id_string(group))NEWLINE with b.block('timing_types') as tt:NEWLINE for name, items in types.items():NEWLINE try:NEWLINE items[0] # QUACKING THE DUCKNEWLINE b.u32(id_string(name))NEWLINE for item in items:NEWLINE b.u32(int(item*1000))NEWLINE except TypeError:NEWLINE passNEWLINE b.u32(len(types))NEWLINE b.ref(tt)NEWLINE b.u32(len(groups))NEWLINE b.ref(tg)NEWLINE b.u32(len(timing))NEWLINE b.ref(blk)NEWLINENEWLINEdef write_partnumber_packages(b, db):NEWLINE with b.block("partnumber_packages") as blk:NEWLINE for partnumber, pkg_rec in db.packages.items():NEWLINE pkg, device, speed = pkg_recNEWLINE b.u32(id_string(partnumber))NEWLINE b.u32(id_string(pkg))NEWLINE b.u32(id_string(device))NEWLINE b.u32(id_string(speed))NEWLINE b.u32(len(db.packages))NEWLINE b.ref(blk)NEWLINENEWLINEpin_re = re.compile(r"IO([TBRL])(\d+)([A-Z])")NEWLINEdef iob2bel(db, name):NEWLINE banks = {'T': [(1, n) for n in range(1, db.cols)],NEWLINE 'B': [(db.rows, n) for n in range(1, db.cols)],NEWLINE 'L': [(n, 1) for n in range(1, db.rows)],NEWLINE 'R': [(n, db.cols) for n in range(1, db.rows)]}NEWLINE side, num, pin = pin_re.match(name).groups()NEWLINE row, col = banks[side][int(num)-1]NEWLINE return f"R{row}C{col}_IOB{pin}"NEWLINENEWLINEdef write_pinout(b, db):NEWLINE with b.block("variants") as blk:NEWLINE for device, pkgs in db.pinout.items():NEWLINE b.u32(id_string(device))NEWLINE with b.block("packages") as pkgblk:NEWLINE for pkg, pins in pkgs.items():NEWLINE b.u32(id_string(pkg))NEWLINE with b.block("pins") as pinblk:NEWLINE for num, loc in pins.items():NEWLINE b.u16(id_string(num))NEWLINE b.u16(id_string(iob2bel(db, loc)))NEWLINE b.u32(len(pins))NEWLINE b.ref(pinblk)NEWLINE b.u32(len(pkgs))NEWLINE b.ref(pkgblk)NEWLINE b.u32(len(db.pinout))NEWLINE b.ref(blk)NEWLINENEWLINEdef write_chipdb(db, f, device):NEWLINE cdev=device.replace('-', '_')NEWLINE b = Bba(f)NEWLINE b.pre('#include "nextpnr.h"')NEWLINE b.pre('#include "embed.h"')NEWLINE b.pre('NEXTPNR_NAMESPACE_BEGIN')NEWLINE with b.block(f'chipdb_{cdev}') as blk:NEWLINE b.str(device)NEWLINE b.u32(1) # versionNEWLINE b.u16(db.rows)NEWLINE b.u16(db.cols)NEWLINE write_grid(b, db.grid)NEWLINE write_global_aliases(b, db)NEWLINE write_timing(b, db.timing)NEWLINE write_partnumber_packages(b, db)NEWLINE write_pinout(b, db)NEWLINE id_strings(b)NEWLINE b.post(f'EmbeddedFile chipdb_file_{cdev}("gowin/chipdb-{device}.bin", {blk});')NEWLINE b.post('NEXTPNR_NAMESPACE_END')NEWLINENEWLINEdef read_constids(f):NEWLINE xre = re.compile(r"X\((.*)\)")NEWLINE for line in f:NEWLINE m = xre.match(line)NEWLINE if m:NEWLINE constids.append(m.group(1))NEWLINE return idsNEWLINENEWLINENEWLINEdef main():NEWLINE parser = argparse.ArgumentParser(description='Make Gowin BBA')NEWLINE parser.add_argument('-d', '--device', required=True)NEWLINE parser.add_argument('-i', '--constids', type=argparse.FileType('r'), default=sys.stdin)NEWLINE parser.add_argument('-o', '--output', type=argparse.FileType('w'), default=sys.stdout)NEWLINENEWLINE args = parser.parse_args()NEWLINE read_constids(args.constids)NEWLINE with importlib.resources.open_binary("apycula", f"{args.device}.pickle") as f:NEWLINE db = pickle.load(f)NEWLINE write_chipdb(db, args.output, args.device)NEWLINENEWLINEif __name__ == "__main__":NEWLINE main()NEWLINE
from django.conf.urls import urlNEWLINENEWLINEfrom .views import ImageDetail, ImageListNEWLINENEWLINENEWLINEurlpatterns = [NEWLINE url(r'^(?P<pk>\d+)/$', ImageDetail.as_view(), name='image-detail'),NEWLINE url(r'^(?P<gallery_ct>[a-z]+)/(?P<gallery_id>\d+)/$', ImageList.as_view(), name='image-list'),NEWLINE]NEWLINE
# Generated by Django 2.1 on 2019-09-24 20:31NEWLINENEWLINEfrom django.db import migrations, modelsNEWLINENEWLINENEWLINEclass Migration(migrations.Migration):NEWLINENEWLINE dependencies = [NEWLINE ('authentication', '0001_initial'),NEWLINE ]NEWLINENEWLINE operations = [NEWLINE migrations.AlterField(NEWLINE model_name='user',NEWLINE name='last_name',NEWLINE field=models.CharField(blank=True, max_length=150, verbose_name='last name'),NEWLINE ),NEWLINE ]NEWLINE
from __future__ import absolute_importNEWLINENEWLINEimport osNEWLINEimport reNEWLINENEWLINEfrom django.conf import settingsNEWLINEfrom django.template import TemplateDoesNotExistNEWLINEfrom django.template.loaders import cachedNEWLINENEWLINEfrom pypugjs.utils import processNEWLINEfrom .compiler import CompilerNEWLINENEWLINENEWLINEclass Loader(cached.Loader):NEWLINE is_usable = TrueNEWLINENEWLINE def include_pug_sources(self, contents):NEWLINE """Lets fetch top level pug includes to enable mixins"""NEWLINE match = re.search(r'^include (.*)$', contents, re.MULTILINE)NEWLINE while match:NEWLINE mixin_name = match.groups()[0]NEWLINE origin = [o for o in self.get_template_sources(mixin_name)][0]NEWLINE template = origin.loader.get_contents(origin)NEWLINE template = self.include_pug_sources(template)NEWLINE contents = re.sub(r'^include (.*)$', template, contents, flags=re.MULTILINE)NEWLINE match = re.search(r'^include (.*)$', contents, re.MULTILINE)NEWLINE return contentsNEWLINENEWLINE def get_contents(self, origin):NEWLINE contents = origin.loader.get_contents(origin)NEWLINE if os.path.splitext(origin.template_name)[1] in ('.pug', '.jade'):NEWLINE contents = self.include_pug_sources(contents)NEWLINE contents = process(NEWLINE contents, filename=origin.template_name, compiler=CompilerNEWLINE )NEWLINE return contentsNEWLINENEWLINE def get_template(self, template_name, **kwargs):NEWLINE """NEWLINE Uses cache if debug is False, otherwise re-reads from file system.NEWLINE """NEWLINE if getattr(settings, 'TEMPLATE_DEBUG', settings.DEBUG):NEWLINE try:NEWLINE return super(cached.Loader, self).get_template(template_name, **kwargs)NEWLINE # TODO: Change IOError to FileNotFoundError after future==0.17.0NEWLINE except IOError:NEWLINE raise TemplateDoesNotExist(template_name)NEWLINENEWLINE return super(Loader, self).get_template(template_name, **kwargs)NEWLINE
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.NEWLINE#NEWLINE# Licensed under the Apache License, Version 2.0 (the "License");NEWLINE# you may not use this file except in compliance with the License.NEWLINE# You may obtain a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS,NEWLINE# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE# See the License for the specific language governing permissions andNEWLINE# limitations under the License.NEWLINEimport base64NEWLINEimport ioNEWLINEimport timeNEWLINENEWLINEimport librosaNEWLINEimport numpy as npNEWLINEimport paddleNEWLINEimport soundfile as sfNEWLINEfrom scipy.io import wavfileNEWLINENEWLINEfrom paddlespeech.cli.log import loggerNEWLINEfrom paddlespeech.cli.tts.infer import TTSExecutorNEWLINEfrom paddlespeech.server.engine.base_engine import BaseEngineNEWLINEfrom paddlespeech.server.utils.audio_process import change_speedNEWLINEfrom paddlespeech.server.utils.errors import ErrorCodeNEWLINEfrom paddlespeech.server.utils.exception import ServerBaseExceptionNEWLINENEWLINE__all__ = ['TTSEngine']NEWLINENEWLINENEWLINEclass TTSServerExecutor(TTSExecutor):NEWLINE def __init__(self):NEWLINE super().__init__()NEWLINE passNEWLINENEWLINENEWLINEclass TTSEngine(BaseEngine):NEWLINE """TTS server engineNEWLINENEWLINE Args:NEWLINE metaclass: Defaults to Singleton.NEWLINE """NEWLINENEWLINE def __init__(self, name=None):NEWLINE """Initialize TTS server engineNEWLINE """NEWLINE super(TTSEngine, self).__init__()NEWLINENEWLINE def init(self, config: dict) -> bool:NEWLINE self.executor = TTSServerExecutor()NEWLINE self.config = configNEWLINENEWLINE try:NEWLINE if self.config.device is not None:NEWLINE self.device = self.config.deviceNEWLINE else:NEWLINE self.device = paddle.get_device()NEWLINE paddle.set_device(self.device)NEWLINE except BaseException as e:NEWLINE logger.error(NEWLINE "Set device failed, please check if device is already used and the parameter 'device' in the yaml file"NEWLINE )NEWLINE logger.error("Initialize TTS server engine Failed on device: %s." %NEWLINE (self.device))NEWLINE return FalseNEWLINENEWLINE try:NEWLINE self.executor._init_from_path(NEWLINE am=self.config.am,NEWLINE am_config=self.config.am_config,NEWLINE am_ckpt=self.config.am_ckpt,NEWLINE am_stat=self.config.am_stat,NEWLINE phones_dict=self.config.phones_dict,NEWLINE tones_dict=self.config.tones_dict,NEWLINE speaker_dict=self.config.speaker_dict,NEWLINE voc=self.config.voc,NEWLINE voc_config=self.config.voc_config,NEWLINE voc_ckpt=self.config.voc_ckpt,NEWLINE voc_stat=self.config.voc_stat,NEWLINE lang=self.config.lang)NEWLINE except BaseException:NEWLINE logger.error("Failed to get model related files.")NEWLINE logger.error("Initialize TTS server engine Failed on device: %s." %NEWLINE (self.device))NEWLINE return FalseNEWLINENEWLINE # warm upNEWLINE try:NEWLINE self.warm_up()NEWLINE logger.info("Warm up successfully.")NEWLINE except Exception as e:NEWLINE logger.error("Failed to warm up on tts engine.")NEWLINE return FalseNEWLINENEWLINE logger.info("Initialize TTS server engine successfully on device: %s." %NEWLINE (self.device))NEWLINE return TrueNEWLINENEWLINE def warm_up(self):NEWLINE """warm upNEWLINE """NEWLINE if self.config.lang == 'zh':NEWLINE sentence = "您好,欢迎使用语音合成服务。"NEWLINE if self.config.lang == 'en':NEWLINE sentence = "Hello and welcome to the speech synthesis service."NEWLINE logger.info("Start to warm up.")NEWLINE for i in range(3):NEWLINE st = time.time()NEWLINE self.executor.infer(NEWLINE text=sentence,NEWLINE lang=self.config.lang,NEWLINE am=self.config.am,NEWLINE spk_id=0, )NEWLINE logger.info(NEWLINE f"The response time of the {i} warm up: {time.time() - st} s")NEWLINENEWLINE def postprocess(self,NEWLINE wav,NEWLINE original_fs: int,NEWLINE target_fs: int=0,NEWLINE volume: float=1.0,NEWLINE speed: float=1.0,NEWLINE audio_path: str=None):NEWLINE """Post-processing operations, including speech, volume, sample rate, save audio fileNEWLINENEWLINE Args:NEWLINE wav (numpy(float)): Synthesized audio sample pointsNEWLINE original_fs (int): original audio sample rateNEWLINE target_fs (int): target audio sample rateNEWLINE volume (float): target volumeNEWLINE speed (float): target speedNEWLINENEWLINE Raises:NEWLINE ServerBaseException: Throws an exception if the change speed unsuccessfully.NEWLINENEWLINE Returns:NEWLINE target_fs: target sample rate for synthesized audio.NEWLINE wav_base64: The base64 format of the synthesized audio.NEWLINE """NEWLINENEWLINE # transform sample_rateNEWLINE if target_fs == 0 or target_fs > original_fs:NEWLINE target_fs = original_fsNEWLINE wav_tar_fs = wavNEWLINE logger.info(NEWLINE "The sample rate of synthesized audio is the same as model, which is {}Hz".NEWLINE format(original_fs))NEWLINE else:NEWLINE wav_tar_fs = librosa.resample(NEWLINE np.squeeze(wav), original_fs, target_fs)NEWLINE logger.info(NEWLINE "The sample rate of model is {}Hz and the target sample rate is {}Hz. Converting the sample rate of the synthesized audio successfully.".NEWLINE format(original_fs, target_fs))NEWLINE # transform volumeNEWLINE wav_vol = wav_tar_fs * volumeNEWLINE logger.info("Transform the volume of the audio successfully.")NEWLINENEWLINE # transform speedNEWLINE try: # windows not support soxbindingsNEWLINE wav_speed = change_speed(wav_vol, speed, target_fs)NEWLINE logger.info("Transform the speed of the audio successfully.")NEWLINE except ServerBaseException:NEWLINE raise ServerBaseException(NEWLINE ErrorCode.SERVER_INTERNAL_ERR,NEWLINE "Failed to transform speed. Can not install soxbindings on your system. \NEWLINE You need to set speed value 1.0.")NEWLINE except BaseException:NEWLINE logger.error("Failed to transform speed.")NEWLINENEWLINE # wav to base64NEWLINE buf = io.BytesIO()NEWLINE wavfile.write(buf, target_fs, wav_speed)NEWLINE base64_bytes = base64.b64encode(buf.read())NEWLINE wav_base64 = base64_bytes.decode('utf-8')NEWLINE logger.info("Audio to string successfully.")NEWLINENEWLINE # save audioNEWLINE if audio_path is not None:NEWLINE if audio_path.endswith(".wav"):NEWLINE sf.write(audio_path, wav_speed, target_fs)NEWLINE elif audio_path.endswith(".pcm"):NEWLINE wav_norm = wav_speed * (32767 / max(0.001,NEWLINE np.max(np.abs(wav_speed))))NEWLINE with open(audio_path, "wb") as f:NEWLINE f.write(wav_norm.astype(np.int16))NEWLINE logger.info("Save audio to {} successfully.".format(audio_path))NEWLINE else:NEWLINE logger.info("There is no need to save audio.")NEWLINENEWLINE return target_fs, wav_base64NEWLINENEWLINE def run(self,NEWLINE sentence: str,NEWLINE spk_id: int=0,NEWLINE speed: float=1.0,NEWLINE volume: float=1.0,NEWLINE sample_rate: int=0,NEWLINE save_path: str=None):NEWLINE """ run include inference and postprocess.NEWLINENEWLINE Args:NEWLINE sentence (str): text to be synthesizedNEWLINE spk_id (int, optional): speaker id for multi-speaker speech synthesis. Defaults to 0.NEWLINE speed (float, optional): speed. Defaults to 1.0.NEWLINE volume (float, optional): volume. Defaults to 1.0.NEWLINE sample_rate (int, optional): target sample rate for synthesized audio, NEWLINE 0 means the same as the model sampling rate. Defaults to 0.NEWLINE save_path (str, optional): The save path of the synthesized audio. NEWLINE None means do not save audio. Defaults to None.NEWLINENEWLINE Raises:NEWLINE ServerBaseException: Throws an exception if tts inference unsuccessfully.NEWLINE ServerBaseException: Throws an exception if postprocess unsuccessfully.NEWLINENEWLINE Returns:NEWLINE lang: model language NEWLINE target_sample_rate: target sample rate for synthesized audio.NEWLINE wav_base64: The base64 format of the synthesized audio.NEWLINE """NEWLINENEWLINE lang = self.config.langNEWLINENEWLINE try:NEWLINE infer_st = time.time()NEWLINE self.executor.infer(NEWLINE text=sentence, lang=lang, am=self.config.am, spk_id=spk_id)NEWLINE infer_et = time.time()NEWLINE infer_time = infer_et - infer_stNEWLINE duration = len(self.executor._outputs['wav']NEWLINE .numpy()) / self.executor.am_config.fsNEWLINE rtf = infer_time / durationNEWLINENEWLINE except ServerBaseException:NEWLINE raise ServerBaseException(ErrorCode.SERVER_INTERNAL_ERR,NEWLINE "tts infer failed.")NEWLINE except BaseException:NEWLINE logger.error("tts infer failed.")NEWLINENEWLINE try:NEWLINE postprocess_st = time.time()NEWLINE target_sample_rate, wav_base64 = self.postprocess(NEWLINE wav=self.executor._outputs['wav'].numpy(),NEWLINE original_fs=self.executor.am_config.fs,NEWLINE target_fs=sample_rate,NEWLINE volume=volume,NEWLINE speed=speed,NEWLINE audio_path=save_path)NEWLINE postprocess_et = time.time()NEWLINE postprocess_time = postprocess_et - postprocess_stNEWLINENEWLINE except ServerBaseException:NEWLINE raise ServerBaseException(ErrorCode.SERVER_INTERNAL_ERR,NEWLINE "tts postprocess failed.")NEWLINE except BaseException:NEWLINE logger.error("tts postprocess failed.")NEWLINENEWLINE logger.info("AM model: {}".format(self.config.am))NEWLINE logger.info("Vocoder model: {}".format(self.config.voc))NEWLINE logger.info("Language: {}".format(lang))NEWLINE logger.info("tts engine type: python")NEWLINENEWLINE logger.info("audio duration: {}".format(duration))NEWLINE logger.info(NEWLINE "frontend inference time: {}".format(self.executor.frontend_time))NEWLINE logger.info("AM inference time: {}".format(self.executor.am_time))NEWLINE logger.info("Vocoder inference time: {}".format(self.executor.voc_time))NEWLINE logger.info("total inference time: {}".format(infer_time))NEWLINE logger.info(NEWLINE "postprocess (change speed, volume, target sample rate) time: {}".NEWLINE format(postprocess_time))NEWLINE logger.info("total generate audio time: {}".format(infer_time +NEWLINE postprocess_time))NEWLINE logger.info("RTF: {}".format(rtf))NEWLINE logger.info("device: {}".format(self.device))NEWLINENEWLINE return lang, target_sample_rate, duration, wav_base64NEWLINE
# ===================================================================NEWLINE#NEWLINE# Copyright (c) 2015, Legrandin <helderijs@gmail.com>NEWLINE# All rights reserved.NEWLINE#NEWLINE# Redistribution and use in source and binary forms, with or withoutNEWLINE# modification, are permitted provided that the following conditionsNEWLINE# are met:NEWLINE#NEWLINE# 1. Redistributions of source code must retain the above copyrightNEWLINE# notice, this list of conditions and the following disclaimer.NEWLINE# 2. Redistributions in binary form must reproduce the above copyrightNEWLINE# notice, this list of conditions and the following disclaimer inNEWLINE# the documentation and/or other materials provided with theNEWLINE# distribution.NEWLINE#NEWLINE# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORSNEWLINE# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOTNEWLINE# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESSNEWLINE# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THENEWLINE# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,NEWLINE# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,NEWLINE# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;NEWLINE# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVERNEWLINE# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICTNEWLINE# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING INNEWLINE# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THENEWLINE# POSSIBILITY OF SUCH DAMAGE.NEWLINE# ===================================================================NEWLINENEWLINEfrom __future__ import print_functionNEWLINENEWLINEimport structNEWLINEimport binasciiNEWLINEfrom collections import namedtupleNEWLINENEWLINEfrom Cryptodome.Util.py3compat import bord, tobytes, tostr, bchr, is_stringNEWLINENEWLINEfrom Cryptodome.Math.Numbers import IntegerNEWLINEfrom Cryptodome.Random import get_random_bytesNEWLINEfrom Cryptodome.Util.asn1 import (DerObjectId, DerOctetString, DerSequence,NEWLINE DerBitString)NEWLINENEWLINEfrom Cryptodome.IO import PKCS8, PEMNEWLINEfrom Cryptodome.PublicKey import (_expand_subject_public_key_info,NEWLINE _create_subject_public_key_info,NEWLINE _extract_subject_public_key_info)NEWLINENEWLINENEWLINEclass UnsupportedEccFeature(ValueError):NEWLINE passNEWLINENEWLINENEWLINEclass EccPoint(object):NEWLINE """A class to abstract a point over an Elliptic Curve.NEWLINENEWLINE :ivar x: The X-coordinate of the ECC pointNEWLINE :vartype x: integerNEWLINENEWLINE :ivar y: The Y-coordinate of the ECC pointNEWLINE :vartype y: integerNEWLINE """NEWLINENEWLINE def __init__(self, x, y):NEWLINE self._x = Integer(x)NEWLINE self._y = Integer(y)NEWLINENEWLINE # BuffersNEWLINE self._common = Integer(0)NEWLINE self._tmp1 = Integer(0)NEWLINE self._x3 = Integer(0)NEWLINE self._y3 = Integer(0)NEWLINENEWLINE def set(self, point):NEWLINE self._x = Integer(point._x)NEWLINE self._y = Integer(point._y)NEWLINE return selfNEWLINENEWLINE def __eq__(self, point):NEWLINE return self._x == point._x and self._y == point._yNEWLINENEWLINE def __neg__(self):NEWLINE if self.is_point_at_infinity():NEWLINE return self.point_at_infinity()NEWLINE return EccPoint(self._x, _curve.p - self._y)NEWLINENEWLINE def copy(self):NEWLINE return EccPoint(self._x, self._y)NEWLINENEWLINE def is_point_at_infinity(self):NEWLINE return not (self._x or self._y)NEWLINENEWLINE @staticmethodNEWLINE def point_at_infinity():NEWLINE return EccPoint(0, 0)NEWLINENEWLINE @propertyNEWLINE def x(self):NEWLINE if self.is_point_at_infinity():NEWLINE raise ValueError("Point at infinity")NEWLINE return self._xNEWLINENEWLINE @propertyNEWLINE def y(self):NEWLINE if self.is_point_at_infinity():NEWLINE raise ValueError("Point at infinity")NEWLINE return self._yNEWLINENEWLINE def double(self):NEWLINE """Double this point (in-place operation).NEWLINENEWLINE :Return:NEWLINE :class:`EccPoint` : this same object (to enable chaining)NEWLINE """NEWLINENEWLINE if not self._y:NEWLINE return self.point_at_infinity()NEWLINENEWLINE common = self._commonNEWLINE tmp1 = self._tmp1NEWLINE x3 = self._x3NEWLINE y3 = self._y3NEWLINENEWLINE # common = (pow(self._x, 2, _curve.p) * 3 - 3) * (self._y << 1).inverse(_curve.p) % _curve.pNEWLINE common.set(self._x)NEWLINE common.inplace_pow(2, _curve.p)NEWLINE common *= 3NEWLINE common -= 3NEWLINE tmp1.set(self._y)NEWLINE tmp1 <<= 1NEWLINE tmp1.inplace_inverse(_curve.p)NEWLINE common *= tmp1NEWLINE common %= _curve.pNEWLINENEWLINE # x3 = (pow(common, 2, _curve.p) - 2 * self._x) % _curve.pNEWLINE x3.set(common)NEWLINE x3.inplace_pow(2, _curve.p)NEWLINE x3 -= self._xNEWLINE x3 -= self._xNEWLINE while x3.is_negative():NEWLINE x3 += _curve.pNEWLINENEWLINE # y3 = ((self._x - x3) * common - self._y) % _curve.pNEWLINE y3.set(self._x)NEWLINE y3 -= x3NEWLINE y3 *= commonNEWLINE y3 -= self._yNEWLINE y3 %= _curve.pNEWLINENEWLINE self._x.set(x3)NEWLINE self._y.set(y3)NEWLINE return selfNEWLINENEWLINE def __iadd__(self, point):NEWLINE """Add a second point to this one"""NEWLINENEWLINE if self.is_point_at_infinity():NEWLINE return self.set(point)NEWLINENEWLINE if point.is_point_at_infinity():NEWLINE return selfNEWLINENEWLINE if self == point:NEWLINE return self.double()NEWLINENEWLINE if self._x == point._x:NEWLINE return self.set(self.point_at_infinity())NEWLINENEWLINE common = self._commonNEWLINE tmp1 = self._tmp1NEWLINE x3 = self._x3NEWLINE y3 = self._y3NEWLINENEWLINE # common = (point._y - self._y) * (point._x - self._x).inverse(_curve.p) % _curve.pNEWLINE common.set(point._y)NEWLINE common -= self._yNEWLINE tmp1.set(point._x)NEWLINE tmp1 -= self._xNEWLINE tmp1.inplace_inverse(_curve.p)NEWLINE common *= tmp1NEWLINE common %= _curve.pNEWLINENEWLINE # x3 = (pow(common, 2, _curve.p) - self._x - point._x) % _curve.pNEWLINE x3.set(common)NEWLINE x3.inplace_pow(2, _curve.p)NEWLINE x3 -= self._xNEWLINE x3 -= point._xNEWLINE while x3.is_negative():NEWLINE x3 += _curve.pNEWLINENEWLINE # y3 = ((self._x - x3) * common - self._y) % _curve.pNEWLINE y3.set(self._x)NEWLINE y3 -= x3NEWLINE y3 *= commonNEWLINE y3 -= self._yNEWLINE y3 %= _curve.pNEWLINENEWLINE self._x.set(x3)NEWLINE self._y.set(y3)NEWLINE return selfNEWLINENEWLINE def __add__(self, point):NEWLINE """Return a new point, the addition of this one and another"""NEWLINENEWLINE result = self.copy()NEWLINE result += pointNEWLINE return resultNEWLINENEWLINE def __mul__(self, scalar):NEWLINE """Return a new point, the scalar product of this one"""NEWLINENEWLINE if scalar < 0:NEWLINE raise ValueError("Scalar multiplication only defined for non-negative integers")NEWLINENEWLINE # Trivial resultsNEWLINE if scalar == 0 or self.is_point_at_infinity():NEWLINE return self.point_at_infinity()NEWLINE elif scalar == 1:NEWLINE return self.copy()NEWLINENEWLINE # Scalar randomizationNEWLINE scalar_blind = Integer.random(exact_bits=64) * _curve.order + scalarNEWLINENEWLINE # Montgomery key ladderNEWLINE r = [self.point_at_infinity().copy(), self.copy()]NEWLINE bit_size = int(scalar_blind.size_in_bits())NEWLINE scalar_int = int(scalar_blind)NEWLINE for i in range(bit_size, -1, -1):NEWLINE di = scalar_int >> i & 1NEWLINE r[di ^ 1] += r[di]NEWLINE r[di].double()NEWLINENEWLINE return r[0]NEWLINENEWLINENEWLINE_Curve = namedtuple("_Curve", "p b order Gx Gy G names oid")NEWLINENEWLINE_curve_gx = Integer(0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296)NEWLINE_curve_gy = Integer(0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5)NEWLINENEWLINE_curve = _Curve(NEWLINE Integer(0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff),NEWLINE Integer(0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b),NEWLINE Integer(0xffffffff00000000ffffffffffffffffbce6faada7179e84f3b9cac2fc632551),NEWLINE _curve_gx,NEWLINE _curve_gy,NEWLINE EccPoint(_curve_gx, _curve_gy),NEWLINE ("P-256", "prime256v1", "secp256r1"),NEWLINE "1.2.840.10045.3.1.7",NEWLINE)NEWLINEdel namedtupleNEWLINENEWLINENEWLINEclass EccKey(object):NEWLINE r"""Class defining an ECC key.NEWLINE Do not instantiate directly.NEWLINE Use :func:`generate`, :func:`construct` or :func:`import_key` instead.NEWLINENEWLINE :ivar curve: The name of the ECC curveNEWLINE :vartype curve: stringNEWLINENEWLINE :ivar pointQ: an ECC point representating the public componentNEWLINE :vartype pointQ: :class:`EccPoint`NEWLINENEWLINE :ivar d: A scalar representating the private componentNEWLINE :vartype d: integerNEWLINE """NEWLINENEWLINE def __init__(self, **kwargs):NEWLINE """Create a new ECC keyNEWLINENEWLINE Keywords:NEWLINE curve : stringNEWLINE It must be *"P-256"*, *"prime256v1"* or *"secp256r1"*.NEWLINE d : integerNEWLINE Only for a private key. It must be in the range ``[1..order-1]``.NEWLINE point : EccPointNEWLINE Mandatory for a public key. If provided for a private key,NEWLINE the implementation will NOT check whether it matches ``d``.NEWLINE """NEWLINENEWLINE kwargs_ = dict(kwargs)NEWLINE self.curve = kwargs_.pop("curve", None)NEWLINE self._d = kwargs_.pop("d", None)NEWLINE self._point = kwargs_.pop("point", None)NEWLINE if kwargs_:NEWLINE raise TypeError("Unknown parameters: " + str(kwargs_))NEWLINENEWLINE if self.curve not in _curve.names:NEWLINE raise ValueError("Unsupported curve (%s)", self.curve)NEWLINENEWLINE if self._d is None:NEWLINE if self._point is None:NEWLINE raise ValueError("Either private or public ECC component must be specified")NEWLINE else:NEWLINE self._d = Integer(self._d)NEWLINE if not 1 <= self._d < _curve.order:NEWLINE raise ValueError("Invalid ECC private component")NEWLINENEWLINE def __eq__(self, other):NEWLINE if other.has_private() != self.has_private():NEWLINE return FalseNEWLINENEWLINE return (other.pointQ.x == self.pointQ.x) and (other.pointQ.y == self.pointQ.y)NEWLINENEWLINE def __repr__(self):NEWLINE if self.has_private():NEWLINE extra = ", d=%d" % int(self._d)NEWLINE else:NEWLINE extra = ""NEWLINE return "EccKey(curve='P-256', x=%d, y=%d%s)" %\NEWLINE (self.pointQ.x, self.pointQ.y, extra)NEWLINENEWLINE def has_private(self):NEWLINE """``True`` if this key can be used for making signatures or decrypting data."""NEWLINENEWLINE return self._d is not NoneNEWLINENEWLINE def _sign(self, z, k):NEWLINE assert 0 < k < _curve.orderNEWLINENEWLINE blind = Integer.random_range(min_inclusive=1,NEWLINE max_exclusive=_curve.order)NEWLINENEWLINE blind_d = self._d * blindNEWLINE inv_blind_k = (blind * k).inverse(_curve.order)NEWLINENEWLINE r = (_curve.G * k).x % _curve.orderNEWLINE s = inv_blind_k * (blind * z + blind_d * r) % _curve.orderNEWLINE return (r, s)NEWLINENEWLINE def _verify(self, z, rs):NEWLINE sinv = rs[1].inverse(_curve.order)NEWLINE point1 = _curve.G * ((sinv * z) % _curve.order)NEWLINE point2 = self.pointQ * ((sinv * rs[0]) % _curve.order)NEWLINE return (point1 + point2).x == rs[0]NEWLINENEWLINE @propertyNEWLINE def d(self):NEWLINE if not self.has_private():NEWLINE raise ValueError("This is not a private ECC key")NEWLINE return self._dNEWLINENEWLINE @propertyNEWLINE def pointQ(self):NEWLINE if self._point is None:NEWLINE self._point = _curve.G * self._dNEWLINE return self._pointNEWLINENEWLINE def public_key(self):NEWLINE """A matching ECC public key.NEWLINENEWLINE Returns:NEWLINE a new :class:`EccKey` objectNEWLINE """NEWLINENEWLINE return EccKey(curve="P-256", point=self.pointQ)NEWLINENEWLINE def _export_subjectPublicKeyInfo(self, compress):NEWLINE NEWLINE # See 2.2 in RFC5480 and 2.3.3 in SEC1NEWLINE # The first byte is:NEWLINE # - 0x02: compressed, only X-coordinate, Y-coordinate is evenNEWLINE # - 0x03: compressed, only X-coordinate, Y-coordinate is oddNEWLINE # - 0x04: uncompressed, X-coordinate is followed by Y-coordinateNEWLINE #NEWLINE # PAI is in theory encoded as 0x00.NEWLINENEWLINE order_bytes = _curve.order.size_in_bytes()NEWLINENEWLINE if compress:NEWLINE first_byte = 2 + self.pointQ.y.is_odd()NEWLINE public_key = (bchr(first_byte) +NEWLINE self.pointQ.x.to_bytes(order_bytes))NEWLINE else:NEWLINE public_key = (b'\x04' +NEWLINE self.pointQ.x.to_bytes(order_bytes) +NEWLINE self.pointQ.y.to_bytes(order_bytes))NEWLINENEWLINE unrestricted_oid = "1.2.840.10045.2.1"NEWLINE return _create_subject_public_key_info(unrestricted_oid,NEWLINE public_key,NEWLINE DerObjectId(_curve.oid))NEWLINENEWLINE def _export_private_der(self, include_ec_params=True):NEWLINENEWLINE assert self.has_private()NEWLINENEWLINE # ECPrivateKey ::= SEQUENCE {NEWLINE # version INTEGER { ecPrivkeyVer1(1) } (ecPrivkeyVer1),NEWLINE # privateKey OCTET STRING,NEWLINE # parameters [0] ECParameters {{ NamedCurve }} OPTIONAL,NEWLINE # publicKey [1] BIT STRING OPTIONALNEWLINE # }NEWLINENEWLINE # Public key - uncompressed formNEWLINE order_bytes = _curve.order.size_in_bytes()NEWLINE public_key = (b'\x04' +NEWLINE self.pointQ.x.to_bytes(order_bytes) +NEWLINE self.pointQ.y.to_bytes(order_bytes))NEWLINENEWLINE seq = [1,NEWLINE DerOctetString(self.d.to_bytes(order_bytes)),NEWLINE DerObjectId(_curve.oid, explicit=0),NEWLINE DerBitString(public_key, explicit=1)]NEWLINENEWLINE if not include_ec_params:NEWLINE del seq[2]NEWLINENEWLINE return DerSequence(seq).encode()NEWLINENEWLINE def _export_pkcs8(self, **kwargs):NEWLINE if kwargs.get('passphrase', None) is not None and 'protection' not in kwargs:NEWLINE raise ValueError("At least the 'protection' parameter should be present")NEWLINE unrestricted_oid = "1.2.840.10045.2.1"NEWLINE private_key = self._export_private_der(include_ec_params=False)NEWLINE result = PKCS8.wrap(private_key,NEWLINE unrestricted_oid,NEWLINE key_params=DerObjectId(_curve.oid),NEWLINE **kwargs)NEWLINE return resultNEWLINENEWLINE def _export_public_pem(self, compress):NEWLINE encoded_der = self._export_subjectPublicKeyInfo(compress)NEWLINE return PEM.encode(encoded_der, "PUBLIC KEY")NEWLINENEWLINE def _export_private_pem(self, passphrase, **kwargs):NEWLINE encoded_der = self._export_private_der()NEWLINE return PEM.encode(encoded_der, "EC PRIVATE KEY", passphrase, **kwargs)NEWLINENEWLINE def _export_private_clear_pkcs8_in_clear_pem(self):NEWLINE encoded_der = self._export_pkcs8()NEWLINE return PEM.encode(encoded_der, "PRIVATE KEY")NEWLINENEWLINE def _export_private_encrypted_pkcs8_in_clear_pem(self, passphrase, **kwargs):NEWLINE assert passphraseNEWLINE if 'protection' not in kwargs:NEWLINE raise ValueError("At least the 'protection' parameter should be present")NEWLINE encoded_der = self._export_pkcs8(passphrase=passphrase, **kwargs)NEWLINE return PEM.encode(encoded_der, "ENCRYPTED PRIVATE KEY")NEWLINENEWLINE def _export_openssh(self, compress):NEWLINE if self.has_private():NEWLINE raise ValueError("Cannot export OpenSSH private keys")NEWLINENEWLINE desc = "ecdsa-sha2-nistp256"NEWLINE order_bytes = _curve.order.size_in_bytes()NEWLINE NEWLINE if compress:NEWLINE first_byte = 2 + self.pointQ.y.is_odd()NEWLINE public_key = (bchr(first_byte) +NEWLINE self.pointQ.x.to_bytes(order_bytes))NEWLINE else:NEWLINE public_key = (b'\x04' +NEWLINE self.pointQ.x.to_bytes(order_bytes) +NEWLINE self.pointQ.y.to_bytes(order_bytes))NEWLINENEWLINE comps = (tobytes(desc), b"nistp256", public_key)NEWLINE blob = b"".join([ struct.pack(">I", len(x)) + x for x in comps])NEWLINE return desc + " " + tostr(binascii.b2a_base64(blob))NEWLINENEWLINE def export_key(self, **kwargs):NEWLINE """Export this ECC key.NEWLINENEWLINE Args:NEWLINE format (string):NEWLINE The format to use for encoding the key:NEWLINENEWLINE - *'DER'*. The key will be encoded in ASN.1 DER format (binary).NEWLINE For a public key, the ASN.1 ``subjectPublicKeyInfo`` structureNEWLINE defined in `RFC5480`_ will be used.NEWLINE For a private key, the ASN.1 ``ECPrivateKey`` structure definedNEWLINE in `RFC5915`_ is used instead (possibly within a PKCS#8 envelope,NEWLINE see the ``use_pkcs8`` flag below).NEWLINE - *'PEM'*. The key will be encoded in a PEM_ envelope (ASCII).NEWLINE - *'OpenSSH'*. The key will be encoded in the OpenSSH_ formatNEWLINE (ASCII, public keys only).NEWLINENEWLINE passphrase (byte string or string):NEWLINE The passphrase to use for protecting the private key.NEWLINENEWLINE use_pkcs8 (boolean):NEWLINE If ``True`` (default and recommended), the `PKCS#8`_ representationNEWLINE will be used.NEWLINENEWLINE If ``False``, the much weaker `PEM encryption`_ mechanism will be used.NEWLINENEWLINE protection (string):NEWLINE When a private key is exported with password-protectionNEWLINE and PKCS#8 (both ``DER`` and ``PEM`` formats), this parameter MUST beNEWLINE present and be a valid algorithm supported by :mod:`Cryptodome.IO.PKCS8`.NEWLINE It is recommended to use ``PBKDF2WithHMAC-SHA1AndAES128-CBC``.NEWLINENEWLINE compress (boolean):NEWLINE If ``True``, a more compact representation of the public keyNEWLINE (X-coordinate only) is used.NEWLINENEWLINE If ``False`` (default), the full public key (in both itsNEWLINE coordinates) will be exported.NEWLINENEWLINE .. warning::NEWLINE If you don't provide a passphrase, the private key will beNEWLINE exported in the clear!NEWLINENEWLINE .. note::NEWLINE When exporting a private key with password-protection and `PKCS#8`_NEWLINE (both ``DER`` and ``PEM`` formats), any extra parametersNEWLINE is passed to :mod:`Cryptodome.IO.PKCS8`.NEWLINENEWLINE .. _PEM: http://www.ietf.org/rfc/rfc1421.txtNEWLINE .. _`PEM encryption`: http://www.ietf.org/rfc/rfc1423.txtNEWLINE .. _`PKCS#8`: http://www.ietf.org/rfc/rfc5208.txtNEWLINE .. _OpenSSH: http://www.openssh.com/txt/rfc5656.txtNEWLINE .. _RFC5480: https://tools.ietf.org/html/rfc5480NEWLINE .. _RFC5915: http://www.ietf.org/rfc/rfc5915.txtNEWLINENEWLINE Returns:NEWLINE A multi-line string (for PEM and OpenSSH) or bytes (for DER) with the encoded key.NEWLINE """NEWLINENEWLINE args = kwargs.copy()NEWLINE ext_format = args.pop("format")NEWLINE if ext_format not in ("PEM", "DER", "OpenSSH"):NEWLINE raise ValueError("Unknown format '%s'" % ext_format)NEWLINE NEWLINE compress = args.pop("compress", False)NEWLINENEWLINE if self.has_private():NEWLINE passphrase = args.pop("passphrase", None)NEWLINE if is_string(passphrase):NEWLINE passphrase = tobytes(passphrase)NEWLINE if not passphrase:NEWLINE raise ValueError("Empty passphrase")NEWLINE use_pkcs8 = args.pop("use_pkcs8", True)NEWLINE if ext_format == "PEM":NEWLINE if use_pkcs8:NEWLINE if passphrase:NEWLINE return self._export_private_encrypted_pkcs8_in_clear_pem(passphrase, **args)NEWLINE else:NEWLINE return self._export_private_clear_pkcs8_in_clear_pem()NEWLINE else:NEWLINE return self._export_private_pem(passphrase, **args)NEWLINE elif ext_format == "DER":NEWLINE # DERNEWLINE if passphrase and not use_pkcs8:NEWLINE raise ValueError("Private keys can only be encrpyted with DER using PKCS#8")NEWLINE if use_pkcs8:NEWLINE return self._export_pkcs8(passphrase=passphrase, **args)NEWLINE else:NEWLINE return self._export_private_der()NEWLINE else:NEWLINE raise ValueError("Private keys cannot be exported in OpenSSH format")NEWLINE else: # Public keyNEWLINE if args:NEWLINE raise ValueError("Unexpected parameters: '%s'" % args)NEWLINE if ext_format == "PEM":NEWLINE return self._export_public_pem(compress)NEWLINE elif ext_format == "DER":NEWLINE return self._export_subjectPublicKeyInfo(compress)NEWLINE else:NEWLINE return self._export_openssh(compress)NEWLINENEWLINENEWLINEdef generate(**kwargs):NEWLINE """Generate a new private key on the given curve.NEWLINENEWLINE Args:NEWLINENEWLINE curve (string):NEWLINE Mandatory. It must be "P-256", "prime256v1" or "secp256r1".NEWLINENEWLINE randfunc (callable):NEWLINE Optional. The RNG to read randomness from.NEWLINE If ``None``, :func:`Cryptodome.Random.get_random_bytes` is used.NEWLINE """NEWLINENEWLINE curve = kwargs.pop("curve")NEWLINE randfunc = kwargs.pop("randfunc", get_random_bytes)NEWLINE if kwargs:NEWLINE raise TypeError("Unknown parameters: " + str(kwargs))NEWLINENEWLINE d = Integer.random_range(min_inclusive=1,NEWLINE max_exclusive=_curve.order,NEWLINE randfunc=randfunc)NEWLINENEWLINE return EccKey(curve=curve, d=d)NEWLINENEWLINENEWLINEdef construct(**kwargs):NEWLINE """Build a new ECC key (private or public) startingNEWLINE from some base components.NEWLINENEWLINE Args:NEWLINENEWLINE curve (string):NEWLINE Mandatory. It must be "P-256", "prime256v1" or "secp256r1".NEWLINENEWLINE d (integer):NEWLINE Only for a private key. It must be in the range ``[1..order-1]``.NEWLINENEWLINE point_x (integer):NEWLINE Mandatory for a public key. X coordinate (affine) of the ECC point.NEWLINENEWLINE point_y (integer):NEWLINE Mandatory for a public key. Y coordinate (affine) of the ECC point.NEWLINENEWLINE Returns:NEWLINE :class:`EccKey` : a new ECC key objectNEWLINE """NEWLINENEWLINE point_x = kwargs.pop("point_x", None)NEWLINE point_y = kwargs.pop("point_y", None)NEWLINENEWLINE if "point" in kwargs:NEWLINE raise TypeError("Unknown keyword: point")NEWLINENEWLINE if None not in (point_x, point_y):NEWLINE kwargs["point"] = EccPoint(point_x, point_y)NEWLINENEWLINE # Validate that the point is on the P-256 curveNEWLINE eq1 = pow(Integer(point_y), 2, _curve.p)NEWLINE x = Integer(point_x)NEWLINE eq2 = pow(x, 3, _curve.p)NEWLINE x *= -3NEWLINE eq2 += xNEWLINE eq2 += _curve.bNEWLINE eq2 %= _curve.pNEWLINENEWLINE if eq1 != eq2:NEWLINE raise ValueError("The point is not on the curve")NEWLINENEWLINE # Validate that the private key matches the public oneNEWLINE d = kwargs.get("d", None)NEWLINE if d is not None and "point" in kwargs:NEWLINE pub_key = _curve.G * dNEWLINE if pub_key.x != point_x or pub_key.y != point_y:NEWLINE raise ValueError("Private and public ECC keys do not match")NEWLINENEWLINE return EccKey(**kwargs)NEWLINENEWLINENEWLINEdef _import_public_der(curve_oid, ec_point):NEWLINE """Convert an encoded EC point into an EccKey objectNEWLINENEWLINE curve_name: string with the OID of the curveNEWLINE ec_point: byte string with the EC point (not DER encoded)NEWLINENEWLINE """NEWLINENEWLINE # We only support P-256 named curves for nowNEWLINE if curve_oid != _curve.oid:NEWLINE raise UnsupportedEccFeature("Unsupported ECC curve (OID: %s)" % curve_oid)NEWLINENEWLINE # See 2.2 in RFC5480 and 2.3.3 in SEC1NEWLINE # The first byte is:NEWLINE # - 0x02: compressed, only X-coordinate, Y-coordinate is evenNEWLINE # - 0x03: compressed, only X-coordinate, Y-coordinate is oddNEWLINE # - 0x04: uncompressed, X-coordinate is followed by Y-coordinateNEWLINE #NEWLINE # PAI is in theory encoded as 0x00.NEWLINENEWLINE order_bytes = _curve.order.size_in_bytes()NEWLINE point_type = bord(ec_point[0])NEWLINE NEWLINE # Uncompressed pointNEWLINE if point_type == 0x04:NEWLINE if len(ec_point) != (1 + 2 * order_bytes):NEWLINE raise ValueError("Incorrect EC point length")NEWLINE x = Integer.from_bytes(ec_point[1:order_bytes+1])NEWLINE y = Integer.from_bytes(ec_point[order_bytes+1:])NEWLINE # Compressed pointNEWLINE elif point_type in (0x02, 0x3):NEWLINE if len(ec_point) != (1 + order_bytes):NEWLINE raise ValueError("Incorrect EC point length")NEWLINE x = Integer.from_bytes(ec_point[1:])NEWLINE y = (x**3 - x*3 + _curve.b).sqrt(_curve.p) # Short WeierstrassNEWLINE if point_type == 0x02 and y.is_odd():NEWLINE y = _curve.p - yNEWLINE if point_type == 0x03 and y.is_even():NEWLINE y = _curve.p - yNEWLINE else:NEWLINE raise ValueError("Incorrect EC point encoding")NEWLINENEWLINE return construct(curve="P-256", point_x=x, point_y=y)NEWLINENEWLINENEWLINEdef _import_subjectPublicKeyInfo(encoded, *kwargs):NEWLINE """Convert a subjectPublicKeyInfo into an EccKey object"""NEWLINENEWLINE # See RFC5480NEWLINENEWLINE # Parse the generic subjectPublicKeyInfo structureNEWLINE oid, ec_point, params = _expand_subject_public_key_info(encoded)NEWLINENEWLINE # ec_point must be an encoded OCTET STRINGNEWLINE # params is encoded ECParametersNEWLINENEWLINE # We accept id-ecPublicKey, id-ecDH, id-ecMQV without making anyNEWLINE # distiction for now.NEWLINE unrestricted_oid = "1.2.840.10045.2.1" # Restrictions can be capturedNEWLINE # in the key usage certificateNEWLINE # extensionNEWLINE ecdh_oid = "1.3.132.1.12"NEWLINE ecmqv_oid = "1.3.132.1.13"NEWLINENEWLINE if oid not in (unrestricted_oid, ecdh_oid, ecmqv_oid):NEWLINE raise UnsupportedEccFeature("Unsupported ECC purpose (OID: %s)" % oid)NEWLINENEWLINE # Parameters are mandatory for all three typesNEWLINE if not params:NEWLINE raise ValueError("Missing ECC parameters")NEWLINENEWLINE # ECParameters ::= CHOICE {NEWLINE # namedCurve OBJECT IDENTIFIERNEWLINE # -- implicitCurve NULLNEWLINE # -- specifiedCurve SpecifiedECDomainNEWLINE # }NEWLINE #NEWLINE # implicitCurve and specifiedCurve are not supported (as per RFC)NEWLINE curve_oid = DerObjectId().decode(params).valueNEWLINENEWLINE return _import_public_der(curve_oid, ec_point)NEWLINENEWLINENEWLINEdef _import_private_der(encoded, passphrase, curve_name=None):NEWLINENEWLINE # ECPrivateKey ::= SEQUENCE {NEWLINE # version INTEGER { ecPrivkeyVer1(1) } (ecPrivkeyVer1),NEWLINE # privateKey OCTET STRING,NEWLINE # parameters [0] ECParameters {{ NamedCurve }} OPTIONAL,NEWLINE # publicKey [1] BIT STRING OPTIONALNEWLINE # }NEWLINENEWLINE private_key = DerSequence().decode(encoded, nr_elements=(3, 4))NEWLINE if private_key[0] != 1:NEWLINE raise ValueError("Incorrect ECC private key version")NEWLINENEWLINE try:NEWLINE curve_name = DerObjectId(explicit=0).decode(private_key[2]).valueNEWLINE except ValueError:NEWLINE passNEWLINENEWLINE if curve_name != _curve.oid:NEWLINE raise UnsupportedEccFeature("Unsupported ECC curve (OID: %s)" % curve_name)NEWLINENEWLINE scalar_bytes = DerOctetString().decode(private_key[1]).payloadNEWLINE order_bytes = _curve.order.size_in_bytes()NEWLINE if len(scalar_bytes) != order_bytes:NEWLINE raise ValueError("Private key is too small")NEWLINE d = Integer.from_bytes(scalar_bytes)NEWLINENEWLINE # Decode public key (if any, it must be P-256)NEWLINE if len(private_key) == 4:NEWLINE public_key_enc = DerBitString(explicit=1).decode(private_key[3]).valueNEWLINE public_key = _import_public_der(curve_name, public_key_enc)NEWLINE point_x = public_key.pointQ.xNEWLINE point_y = public_key.pointQ.yNEWLINE else:NEWLINE point_x = point_y = NoneNEWLINENEWLINE return construct(curve="P-256", d=d, point_x=point_x, point_y=point_y)NEWLINENEWLINENEWLINEdef _import_pkcs8(encoded, passphrase):NEWLINENEWLINE # From RFC5915, Section 1:NEWLINE #NEWLINE # Distributing an EC private key with PKCS#8 [RFC5208] involves including:NEWLINE # a) id-ecPublicKey, id-ecDH, or id-ecMQV (from [RFC5480]) with theNEWLINE # namedCurve as the parameters in the privateKeyAlgorithm field; andNEWLINE # b) ECPrivateKey in the PrivateKey field, which is an OCTET STRING.NEWLINENEWLINE algo_oid, private_key, params = PKCS8.unwrap(encoded, passphrase)NEWLINENEWLINE # We accept id-ecPublicKey, id-ecDH, id-ecMQV without making anyNEWLINE # distiction for now.NEWLINE unrestricted_oid = "1.2.840.10045.2.1"NEWLINE ecdh_oid = "1.3.132.1.12"NEWLINE ecmqv_oid = "1.3.132.1.13"NEWLINENEWLINE if algo_oid not in (unrestricted_oid, ecdh_oid, ecmqv_oid):NEWLINE raise UnsupportedEccFeature("Unsupported ECC purpose (OID: %s)" % oid)NEWLINENEWLINE curve_name = DerObjectId().decode(params).valueNEWLINENEWLINE return _import_private_der(private_key, passphrase, curve_name)NEWLINENEWLINENEWLINEdef _import_x509_cert(encoded, *kwargs):NEWLINENEWLINE sp_info = _extract_subject_public_key_info(encoded)NEWLINE return _import_subjectPublicKeyInfo(sp_info)NEWLINENEWLINENEWLINEdef _import_der(encoded, passphrase):NEWLINENEWLINE try:NEWLINE return _import_subjectPublicKeyInfo(encoded, passphrase)NEWLINE except UnsupportedEccFeature as err:NEWLINE raise errNEWLINE except (ValueError, TypeError, IndexError):NEWLINE passNEWLINE NEWLINE try:NEWLINE return _import_x509_cert(encoded, passphrase)NEWLINE except UnsupportedEccFeature as err:NEWLINE raise errNEWLINE except (ValueError, TypeError, IndexError):NEWLINE passNEWLINE NEWLINE try:NEWLINE return _import_private_der(encoded, passphrase)NEWLINE except UnsupportedEccFeature as err:NEWLINE raise errNEWLINE except (ValueError, TypeError, IndexError):NEWLINE passNEWLINE NEWLINE try:NEWLINE return _import_pkcs8(encoded, passphrase)NEWLINE except UnsupportedEccFeature as err:NEWLINE raise errNEWLINE except (ValueError, TypeError, IndexError):NEWLINE passNEWLINENEWLINE raise ValueError("Not an ECC DER key")NEWLINENEWLINENEWLINEdef _import_openssh(encoded):NEWLINE keystring = binascii.a2b_base64(encoded.split(b' ')[1])NEWLINENEWLINE keyparts = []NEWLINE while len(keystring) > 4:NEWLINE l = struct.unpack(">I", keystring[:4])[0]NEWLINE keyparts.append(keystring[4:4 + l])NEWLINE keystring = keystring[4 + l:]NEWLINENEWLINE if keyparts[1] != b"nistp256":NEWLINE raise ValueError("Unsupported ECC curve")NEWLINENEWLINE return _import_public_der(_curve.oid, keyparts[2])NEWLINENEWLINENEWLINEdef import_key(encoded, passphrase=None):NEWLINE """Import an ECC key (public or private).NEWLINENEWLINE Args:NEWLINE encoded (bytes or multi-line string):NEWLINE The ECC key to import.NEWLINENEWLINE An ECC **public** key can be:NEWLINENEWLINE - An X.509 certificate, binary (DER) or ASCII (PEM)NEWLINE - An X.509 ``subjectPublicKeyInfo``, binary (DER) or ASCII (PEM)NEWLINE - An OpenSSH line (e.g. the content of ``~/.ssh/id_ecdsa``, ASCII)NEWLINENEWLINE An ECC **private** key can be:NEWLINENEWLINE - In binary format (DER, see section 3 of `RFC5915`_ or `PKCS#8`_)NEWLINE - In ASCII format (PEM or OpenSSH)NEWLINENEWLINE Private keys can be in the clear or password-protected.NEWLINENEWLINE For details about the PEM encoding, see `RFC1421`_/`RFC1423`_.NEWLINENEWLINE passphrase (byte string):NEWLINE The passphrase to use for decrypting a private key.NEWLINE Encryption may be applied protected at the PEM level or at the PKCS#8 level.NEWLINE This parameter is ignored if the key in input is not encrypted.NEWLINENEWLINE Returns:NEWLINE :class:`EccKey` : a new ECC key objectNEWLINENEWLINE Raises:NEWLINE ValueError: when the given key cannot be parsed (possibly becauseNEWLINE the pass phrase is wrong).NEWLINENEWLINE .. _RFC1421: http://www.ietf.org/rfc/rfc1421.txtNEWLINE .. _RFC1423: http://www.ietf.org/rfc/rfc1423.txtNEWLINE .. _RFC5915: http://www.ietf.org/rfc/rfc5915.txtNEWLINE .. _`PKCS#8`: http://www.ietf.org/rfc/rfc5208.txtNEWLINE """NEWLINENEWLINE encoded = tobytes(encoded)NEWLINE if passphrase is not None:NEWLINE passphrase = tobytes(passphrase)NEWLINENEWLINE # PEMNEWLINE if encoded.startswith(b'-----'):NEWLINE der_encoded, marker, enc_flag = PEM.decode(tostr(encoded), passphrase)NEWLINE if enc_flag:NEWLINE passphrase = NoneNEWLINE try:NEWLINE result = _import_der(der_encoded, passphrase)NEWLINE except UnsupportedEccFeature as uef:NEWLINE raise uefNEWLINE except ValueError:NEWLINE raise ValueError("Invalid DER encoding inside the PEM file")NEWLINE return resultNEWLINENEWLINE # OpenSSHNEWLINE if encoded.startswith(b'ecdsa-sha2-'):NEWLINE return _import_openssh(encoded)NEWLINENEWLINE # DERNEWLINE if bord(encoded[0]) == 0x30:NEWLINE return _import_der(encoded, passphrase)NEWLINENEWLINE raise ValueError("ECC key format is not supported")NEWLINENEWLINENEWLINEif __name__ == "__main__":NEWLINE NEWLINE import timeNEWLINENEWLINE d = 0xc51e4753afdec1e6b6c6a5b992f43f8dd0c7a8933072708b6522468b2ffb06fdNEWLINENEWLINE point = generate(curve="P-256").pointQNEWLINE start = time.time()NEWLINE count = 30NEWLINE for x in range(count):NEWLINE _ = point * dNEWLINE print((time.time() - start) / count * 1000, "ms")NEWLINE
#-NEWLINE# Copyright (c) 2013 Michael RoeNEWLINE# All rights reserved.NEWLINE#NEWLINE# This software was developed by SRI International and the University ofNEWLINE# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237NEWLINE# ("CTSRD"), as part of the DARPA CRASH research programme.NEWLINE#NEWLINE# @BERI_LICENSE_HEADER_START@NEWLINE#NEWLINE# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributorNEWLINE# license agreements. See the NOTICE file distributed with this work forNEWLINE# additional information regarding copyright ownership. BERI licenses thisNEWLINE# file to you under the BERI Hardware-Software License, Version 1.0 (theNEWLINE# "License"); you may not use this file except in compliance with theNEWLINE# License. You may obtain a copy of the License at:NEWLINE#NEWLINE# http://www.beri-open-systems.org/legal/license-1-0.txtNEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, Work distributedNEWLINE# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES ORNEWLINE# CONDITIONS OF ANY KIND, either express or implied. See the License for theNEWLINE# specific language governing permissions and limitations under the License.NEWLINE#NEWLINE# @BERI_LICENSE_HEADER_END@NEWLINE#NEWLINENEWLINE#NEWLINE# Test single-precision floor to long word when the FPU is in 64 bit modeNEWLINE#NEWLINENEWLINEfrom beritest_tools import BaseBERITestCaseNEWLINEfrom nose.plugins.attrib import attrNEWLINENEWLINEclass test_raw_fpu_floor_l_s_d64(BaseBERITestCase):NEWLINENEWLINE @attr('float64')NEWLINE def test_raw_fpu_floor_l_s_d64_1(self):NEWLINE '''Test single precision floor of -0.75'''NEWLINE self.assertRegisterEqual(self.MIPS.a0 , 0xffffffffffffffff, "-0.75 did not round down to -1")NEWLINENEWLINE @attr('float64')NEWLINE def test_raw_fpu_floor_l_s_d64_2(self):NEWLINE '''Test single precision floor of -0.5'''NEWLINE self.assertRegisterEqual(self.MIPS.a1 , 0xffffffffffffffff, "-0.5 did not round down to -1")NEWLINENEWLINE @attr('float64')NEWLINE def test_raw_fpu_floor_l_s_d64_3(self):NEWLINE '''Test single precision floor of -0.25'''NEWLINE self.assertRegisterEqual(self.MIPS.a2, 0xffffffffffffffff, "-0.25 did not round down to -1")NEWLINENEWLINE @attr('float64')NEWLINE def test_raw_fpu_floor_l_s_d64_4(self):NEWLINE '''Test single precision floor of 0.5'''NEWLINE self.assertRegisterEqual(self.MIPS.a3, 0, "0.5 did not round down to 0")NEWLINENEWLINE @attr('float64')NEWLINE def test_raw_fpu_floor_l_s_d64_5(self):NEWLINE '''Test single precision floor of 1.5'''NEWLINE self.assertRegisterEqual(self.MIPS.a4, 1, "1.5 did not round down to 1")NEWLINE
from django.contrib.contenttypes.models import ContentTypeNEWLINEimport jsonNEWLINENEWLINEfrom django.http import Http404, HttpResponseNEWLINEfrom django.contrib import messagesNEWLINEfrom django.contrib.auth import get_user_modelNEWLINEfrom django.contrib.auth.decorators import login_required, user_passes_testNEWLINEfrom django.core.urlresolvers import reverseNEWLINEfrom django.shortcuts import get_object_or_404, redirect, renderNEWLINEfrom guardian.decorators import permission_requiredNEWLINENEWLINEfrom guardian.shortcuts import get_objects_for_userNEWLINENEWLINEfrom account.models import DepartmentGroupNEWLINEfrom backend.tasks import TestConnectionTaskNEWLINEfrom event.models import NotificationPreferencesNEWLINEfrom .models import Application, Department, Environment, Server, ServerRoleNEWLINEfrom task.models import ExecutionNEWLINENEWLINENEWLINE@login_requiredNEWLINEdef index(request):NEWLINE data = {}NEWLINE executions = Execution.objects.filter(task__application__department_id=request.current_department_id)NEWLINE if not executions.count():NEWLINE return redirect(reverse('first_steps_page'))NEWLINE return render(request, 'page/index.html', data)NEWLINENEWLINENEWLINE@permission_required('core.view_application', (Application, 'id', 'application_id'))NEWLINEdef application_page(request, application_id):NEWLINE data = {}NEWLINE data['application'] = get_object_or_404(Application, pk=application_id)NEWLINE return render(request, 'page/application.html', data)NEWLINENEWLINENEWLINE@permission_required('core.view_environment', (Environment, 'id', 'environment_id'))NEWLINEdef environment_page(request, environment_id):NEWLINE data = {}NEWLINE data['environment'] = get_object_or_404(Environment, pk=environment_id)NEWLINE data['servers'] = list(Server.objects.filter(environment_id=environment_id).prefetch_related('roles'))NEWLINE return render(request, 'page/environment.html', data)NEWLINENEWLINENEWLINE@permission_required('core.view_environment', (Environment, 'servers__id', 'server_id'))NEWLINEdef server_test(request, server_id):NEWLINE data = {}NEWLINE data['server'] = get_object_or_404(Server, pk=server_id)NEWLINE data['task_id'] = TestConnectionTask().delay(server_id).idNEWLINE return render(request, 'partial/server_test.html', data)NEWLINENEWLINENEWLINE@login_requiredNEWLINEdef server_test_ajax(request, task_id):NEWLINE data = {}NEWLINE task = TestConnectionTask().AsyncResult(task_id)NEWLINE if task.status == 'SUCCESS':NEWLINE status, output = task.get()NEWLINE data['status'] = statusNEWLINE data['output'] = outputNEWLINE elif task.status == 'FAILED':NEWLINE data['status'] = FalseNEWLINE else:NEWLINE data['status'] = NoneNEWLINE return HttpResponse(json.dumps(data), content_type="application/json")NEWLINENEWLINENEWLINE@login_requiredNEWLINEdef first_steps_page(request):NEWLINE data = {}NEWLINE return render(request, 'page/first_steps.html', data)NEWLINENEWLINENEWLINE@login_requiredNEWLINEdef settings_page(request, section='user', subsection='profile'):NEWLINE data = {}NEWLINE data['section'] = sectionNEWLINE data['subsection'] = subsectionNEWLINE data['department'] = Department(pk=request.current_department_id)NEWLINE data['on_settings'] = TrueNEWLINE handler = '_settings_%s_%s' % (section, subsection)NEWLINE if section == 'system' and request.user.is_superuser is not True:NEWLINE return redirect('index')NEWLINE if section == 'department' and not request.user.has_perm('core.change_department', obj=data['department']):NEWLINE return redirect('index')NEWLINE if handler in globals():NEWLINE data = globals()[handler](request, data)NEWLINE else:NEWLINE raise Http404NEWLINE return render(request, 'page/settings.html', data)NEWLINENEWLINENEWLINEdef _settings_account_profile(request, data):NEWLINE data['subsection_template'] = 'partial/account_profile.html'NEWLINE from account.forms import account_create_formNEWLINE form = account_create_form('user_profile', request, request.user.id)NEWLINE form.fields['email'].widget.attrs['readonly'] = TrueNEWLINE data['form'] = formNEWLINE if request.method == 'POST':NEWLINE if form.is_valid():NEWLINE form.save()NEWLINE data['user'] = form.instanceNEWLINE messages.success(request, 'Saved')NEWLINE return dataNEWLINENEWLINENEWLINEdef _settings_account_password(request, data):NEWLINE data['subsection_template'] = 'partial/account_password.html'NEWLINE from account.forms import account_create_formNEWLINE form = account_create_form('user_password', request, request.user.id)NEWLINE data['form'] = formNEWLINE if request.method == 'POST':NEWLINE if form.is_valid():NEWLINE user = form.save(commit=False)NEWLINE user.set_password(user.password)NEWLINE user.save()NEWLINE data['user'] = form.instanceNEWLINE messages.success(request, 'Saved')NEWLINE return dataNEWLINENEWLINENEWLINEdef _settings_account_notifications(request, data):NEWLINE data['subsection_template'] = 'partial/account_notifications.html'NEWLINE data['applications'] = get_objects_for_user(request.user, 'core.view_application')NEWLINE content_type = ContentType.objects.get_for_model(Application)NEWLINE if request.method == 'POST':NEWLINE for application in data['applications']:NEWLINE key = 'notification[%s]' % application.idNEWLINE notification, created = NotificationPreferences.objects.get_or_create(NEWLINE user=request.user,NEWLINE event_type='ExecutionFinish',NEWLINE content_type=content_type,NEWLINE object_id=application.id)NEWLINE if notification.is_active != (key in request.POST):NEWLINE notification.is_active = key in request.POSTNEWLINE notification.save()NEWLINE messages.success(request, 'Saved')NEWLINE data['notifications'] = NotificationPreferences.objects.filter(NEWLINE user=request.user,NEWLINE event_type='ExecutionFinish',NEWLINE content_type=content_type.id).values_list('object_id', 'is_active')NEWLINE data['notifications'] = dict(data['notifications'])NEWLINE return dataNEWLINENEWLINENEWLINEdef _settings_department_applications(request, data):NEWLINE data['subsection_template'] = 'partial/application_list.html'NEWLINE data['applications'] = Application.objects.filter(department_id=request.current_department_id)NEWLINE data['empty'] = not bool(data['applications'].count())NEWLINE return dataNEWLINENEWLINENEWLINEdef _settings_department_users(request, data):NEWLINE data['subsection_template'] = 'partial/user_list.html'NEWLINE from guardian.shortcuts import get_users_with_permsNEWLINE department = Department.objects.get(pk=request.current_department_id)NEWLINE data['users'] = get_users_with_perms(department).prefetch_related('groups__departmentgroup').order_by('name')NEWLINE data['department_user_list'] = TrueNEWLINE data['form_name'] = 'user'NEWLINE return dataNEWLINENEWLINENEWLINEdef _settings_department_groups(request, data):NEWLINE data['subsection_template'] = 'partial/group_list.html'NEWLINE data['groups'] = DepartmentGroup.objects.filter(department_id=request.current_department_id)NEWLINE return dataNEWLINENEWLINENEWLINEdef _settings_department_serverroles(request, data):NEWLINE data['subsection_template'] = 'partial/serverrole_list.html'NEWLINE data['serverroles'] = ServerRole.objects.filter(department_id=request.current_department_id)NEWLINE data['empty'] = not bool(data['serverroles'].count())NEWLINE return dataNEWLINENEWLINENEWLINE@user_passes_test(lambda u: u.is_superuser)NEWLINEdef _settings_system_departments(request, data):NEWLINE data['subsection_template'] = 'partial/department_list.html'NEWLINE data['departments'] = Department.objects.all()NEWLINE return dataNEWLINENEWLINENEWLINE@user_passes_test(lambda u: u.is_superuser)NEWLINEdef _settings_system_users(request, data):NEWLINE data['subsection_template'] = 'partial/user_list.html'NEWLINE data['users'] = get_user_model().objects.exclude(id=-1).prefetch_related('groups__departmentgroup__department').order_by('name')NEWLINE data['form_name'] = 'usersystem'NEWLINE return dataNEWLINENEWLINENEWLINEdef department_switch(request, id):NEWLINE department = get_object_or_404(Department, pk=id)NEWLINE if request.user.has_perm('core.view_department', department):NEWLINE request.session['current_department_id'] = int(id)NEWLINE else:NEWLINE messages.error(request, 'Access forbidden')NEWLINE return redirect('index')NEWLINENEWLINENEWLINEdef handle_403(request):NEWLINE print 'aaaaaaaa'NEWLINE messages.error(request, 'Access forbidden')NEWLINE return redirect('index')
from nose import SkipTestNEWLINEfrom nose.tools import eq_NEWLINEfrom pyquery import PyQuery as pqNEWLINENEWLINEfrom kitsune.gallery.models import Image, VideoNEWLINEfrom kitsune.gallery.tests import image, videoNEWLINEfrom kitsune.sumo.helpers import urlparamsNEWLINEfrom kitsune.sumo.tests import TestCase, get, LocalizingClient, postNEWLINEfrom kitsune.sumo.urlresolvers import reverseNEWLINEfrom kitsune.users.tests import userNEWLINENEWLINENEWLINEclass GalleryPageCase(TestCase):NEWLINE def tearDown(self):NEWLINE Image.objects.all().delete()NEWLINE super(GalleryPageCase, self).tearDown()NEWLINENEWLINE def test_gallery_images(self):NEWLINE """Test that all images show up on images gallery page.NEWLINENEWLINE Also, Make sure they don't show up on videos page.NEWLINENEWLINE """NEWLINE img = image()NEWLINE response = get(self.client, 'gallery.gallery', args=['image'])NEWLINE eq_(200, response.status_code)NEWLINE doc = pq(response.content)NEWLINE imgs = doc('#media-list li img')NEWLINE eq_(1, len(imgs))NEWLINE eq_(img.thumbnail_url_if_set(), imgs[0].attrib['src'])NEWLINENEWLINE def test_gallery_locale(self):NEWLINE """Test that images only show for their set locale."""NEWLINE image(locale='es')NEWLINE url = reverse('gallery.gallery', args=['image'])NEWLINE response = self.client.get(url, follow=True)NEWLINE eq_(200, response.status_code)NEWLINE doc = pq(response.content)NEWLINE imgs = doc('#media-list li img')NEWLINE eq_(0, len(imgs))NEWLINENEWLINE locale_url = reverse('gallery.gallery', locale='es',NEWLINE args=['image'])NEWLINE response = self.client.get(locale_url, follow=True)NEWLINE eq_(200, response.status_code)NEWLINE doc = pq(response.content)NEWLINE imgs = doc('#media-list li img')NEWLINE eq_(1, len(imgs))NEWLINENEWLINENEWLINEclass GalleryAsyncCase(TestCase):NEWLINE def tearDown(self):NEWLINE Image.objects.all().delete()NEWLINE super(GalleryAsyncCase, self).tearDown()NEWLINENEWLINE def test_gallery_image_list(self):NEWLINE """Test for ajax endpoint without search parameter."""NEWLINE img = image()NEWLINE url = urlparams(reverse('gallery.async'), type='image')NEWLINE response = self.client.get(url, follow=True)NEWLINE eq_(200, response.status_code)NEWLINE doc = pq(response.content)NEWLINE imgs = doc('#media-list li img')NEWLINE eq_(1, len(imgs))NEWLINE eq_(img.thumbnail_url_if_set(), imgs[0].attrib['src'])NEWLINENEWLINE def test_gallery_image_search(self):NEWLINE """Test for ajax endpoint with search parameter."""NEWLINE img = image()NEWLINE url = urlparams(reverse('gallery.async'), type='image', q='foobar')NEWLINE response = self.client.get(url, follow=True)NEWLINE eq_(200, response.status_code)NEWLINE doc = pq(response.content)NEWLINE imgs = doc('#media-list li img')NEWLINE eq_(0, len(imgs))NEWLINENEWLINE url = urlparams(reverse('gallery.async'), type='image', q=img.title)NEWLINE response = self.client.get(url, follow=True)NEWLINE eq_(200, response.status_code)NEWLINE doc = pq(response.content)NEWLINE imgs = doc('#media-list li img')NEWLINE eq_(1, len(imgs))NEWLINE eq_(img.thumbnail_url_if_set(), imgs[0].attrib['src'])NEWLINENEWLINENEWLINEclass GalleryUploadTestCase(TestCase):NEWLINE client_class = LocalizingClientNEWLINENEWLINE def setUp(self):NEWLINE super(GalleryUploadTestCase, self).setUp()NEWLINENEWLINE self.u = user(save=True)NEWLINE self.client.login(username=self.u.username, password='testpass')NEWLINENEWLINE def tearDown(self):NEWLINE Image.objects.all().delete()NEWLINE Video.objects.all().delete()NEWLINE super(GalleryUploadTestCase, self).tearDown()NEWLINENEWLINE def test_image_draft_shows(self):NEWLINE """The image draft is loaded for this user."""NEWLINE image(is_draft=True, creator=self.u)NEWLINE response = get(self.client, 'gallery.gallery', args=['image'])NEWLINE eq_(200, response.status_code)NEWLINE doc = pq(response.content)NEWLINE assert doc('.file.preview img').attr('src').endswith('098f6b.jpg')NEWLINE eq_(1, doc('.file.preview img').length)NEWLINENEWLINE def test_image_draft_post(self):NEWLINE """Posting to the page saves the field values for the image draft."""NEWLINE image(is_draft=True, creator=self.u)NEWLINE response = post(self.client, 'gallery.gallery',NEWLINE {'description': '??', 'title': 'test'}, args=['image'])NEWLINE eq_(200, response.status_code)NEWLINE doc = pq(response.content)NEWLINE # Preview for all 3 video formats: flv, ogv, webmNEWLINE eq_('??', doc('#gallery-upload-modal textarea').html())NEWLINE eq_('test', doc('#gallery-upload-modal input[name="title"]').val())NEWLINENEWLINE def test_video_draft_post(self):NEWLINE """Posting to the page saves the field values for the video draft."""NEWLINE video(is_draft=True, creator=self.u)NEWLINE response = post(self.client, 'gallery.gallery',NEWLINE {'title': 'zTestz'}, args=['image'])NEWLINE eq_(200, response.status_code)NEWLINE doc = pq(response.content)NEWLINE # Preview for all 3 video formats: flv, ogv, webmNEWLINE eq_('zTestz', doc('#gallery-upload-modal input[name="title"]').val())NEWLINENEWLINE def test_modal_locale_selected(self):NEWLINE """Locale value is selected for upload modal."""NEWLINE response = get(self.client, 'gallery.gallery', args=['image'],NEWLINE locale='fr')NEWLINE doc = pq(response.content)NEWLINE eq_('fr',NEWLINE doc('#gallery-upload-image option[selected="selected"]').val())NEWLINENEWLINE def test_invalid_messages(self):NEWLINE # TODO(paul) POSTing invalid data shows error messages and pre-fillsNEWLINE raise SkipTest('Not implemented')NEWLINENEWLINENEWLINEclass MediaPageCase(TestCase):NEWLINE def tearDown(self):NEWLINE Image.objects.all().delete()NEWLINE super(MediaPageCase, self).tearDown()NEWLINENEWLINE def test_image_media_page(self):NEWLINE """Test the media page."""NEWLINE img = image()NEWLINE response = self.client.get(img.get_absolute_url(), follow=True)NEWLINE eq_(200, response.status_code)NEWLINE doc = pq(response.content)NEWLINE eq_(img.title, doc('h1').text())NEWLINE eq_(img.description, doc('#media-object div.description').text())NEWLINE eq_(img.file.url, doc('#media-view img')[0].attrib['src'])NEWLINE
# coding: utf-8NEWLINENEWLINE"""NEWLINE Cost ManagementNEWLINENEWLINE The API for Project Koku and OpenShift cost management. You can find out more about Cost Management at [https://github.com/project-koku/](https://github.com/project-koku/). # noqa: E501NEWLINENEWLINE The version of the OpenAPI document: 1.0.0NEWLINE Generated by: https://openapi-generator.techNEWLINE"""NEWLINENEWLINENEWLINEfrom __future__ import absolute_importNEWLINENEWLINEimport unittestNEWLINEimport datetimeNEWLINENEWLINEimport openapi_clientNEWLINEfrom openapi_client.models.cost_model_resp_providers import CostModelRespProviders # noqa: E501NEWLINEfrom openapi_client.rest import ApiExceptionNEWLINENEWLINEclass TestCostModelRespProviders(unittest.TestCase):NEWLINE """CostModelRespProviders unit test stubs"""NEWLINENEWLINE def setUp(self):NEWLINE passNEWLINENEWLINE def tearDown(self):NEWLINE passNEWLINENEWLINE def make_instance(self, include_optional):NEWLINE """Test CostModelRespProvidersNEWLINE include_option is a boolean, when False only requiredNEWLINE params are included, when True both required andNEWLINE optional params are included """NEWLINE # model = openapi_client.models.cost_model_resp_providers.CostModelRespProviders() # noqa: E501NEWLINE if include_optional :NEWLINE return CostModelRespProviders(NEWLINE uuid = 'e5ff62e7-e6d6-5513-5532-45fe72792dae', NEWLINE name = 'provider'NEWLINE )NEWLINE else :NEWLINE return CostModelRespProviders(NEWLINE )NEWLINENEWLINE def testCostModelRespProviders(self):NEWLINE """Test CostModelRespProviders"""NEWLINE inst_req_only = self.make_instance(include_optional=False)NEWLINE inst_req_and_optional = self.make_instance(include_optional=True)NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE unittest.main()NEWLINE
import jsonNEWLINENEWLINENEWLINEdef dummy_function():NEWLINE return []NEWLINENEWLINENEWLINEdef test_norun():NEWLINE this shall not runNEWLINENEWLINEif __name__ == '__main__':NEWLINE test_norun()NEWLINE