Unnamed: 0
int64
0
10k
repository_name
stringlengths
7
54
func_path_in_repository
stringlengths
5
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
100
30.3k
language
stringclasses
1 value
func_code_string
stringlengths
100
30.3k
func_code_tokens
stringlengths
138
33.2k
func_documentation_string
stringlengths
1
15k
func_documentation_tokens
stringlengths
5
5.14k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
200
alerta/alerta
alerta/models/key.py
ApiKey.find_all
def find_all(query: Query=None) -> List['ApiKey']: """ List all API keys. """ return [ApiKey.from_db(key) for key in db.get_keys(query)]
python
def find_all(query: Query=None) -> List['ApiKey']: """ List all API keys. """ return [ApiKey.from_db(key) for key in db.get_keys(query)]
['def', 'find_all', '(', 'query', ':', 'Query', '=', 'None', ')', '->', 'List', '[', "'ApiKey'", ']', ':', 'return', '[', 'ApiKey', '.', 'from_db', '(', 'key', ')', 'for', 'key', 'in', 'db', '.', 'get_keys', '(', 'query', ')', ']']
List all API keys.
['List', 'all', 'API', 'keys', '.']
train
https://github.com/alerta/alerta/blob/6478d6addc217c96a4a6688fab841035bef134e1/alerta/models/key.py#L120-L124
201
fastai/fastai
fastai/utils/ipython.py
is_in_ipython
def is_in_ipython(): "Is the code running in the ipython environment (jupyter including)" program_name = os.path.basename(os.getenv('_', '')) if ('jupyter-notebook' in program_name or # jupyter-notebook 'ipython' in program_name or # ipython 'JPY_PARENT_PID' in os.environ): # ipython-notebook return True else: return False
python
def is_in_ipython(): "Is the code running in the ipython environment (jupyter including)" program_name = os.path.basename(os.getenv('_', '')) if ('jupyter-notebook' in program_name or # jupyter-notebook 'ipython' in program_name or # ipython 'JPY_PARENT_PID' in os.environ): # ipython-notebook return True else: return False
['def', 'is_in_ipython', '(', ')', ':', 'program_name', '=', 'os', '.', 'path', '.', 'basename', '(', 'os', '.', 'getenv', '(', "'_'", ',', "''", ')', ')', 'if', '(', "'jupyter-notebook'", 'in', 'program_name', 'or', '# jupyter-notebook', "'ipython'", 'in', 'program_name', 'or', '# ipython', "'JPY_PARENT_PID'", 'in', 'os', '.', 'environ', ')', ':', '# ipython-notebook', 'return', 'True', 'else', ':', 'return', 'False']
Is the code running in the ipython environment (jupyter including)
['Is', 'the', 'code', 'running', 'in', 'the', 'ipython', 'environment', '(', 'jupyter', 'including', ')']
train
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/utils/ipython.py#L5-L15
202
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/dataframeeditor.py
DataFrameModel.header
def header(self, axis, x, level=0): """ Return the values of the labels for the header of columns or rows. The value corresponds to the header of column or row x in the given level. """ ax = self._axis(axis) return ax.values[x] if not hasattr(ax, 'levels') \ else ax.values[x][level]
python
def header(self, axis, x, level=0): """ Return the values of the labels for the header of columns or rows. The value corresponds to the header of column or row x in the given level. """ ax = self._axis(axis) return ax.values[x] if not hasattr(ax, 'levels') \ else ax.values[x][level]
['def', 'header', '(', 'self', ',', 'axis', ',', 'x', ',', 'level', '=', '0', ')', ':', 'ax', '=', 'self', '.', '_axis', '(', 'axis', ')', 'return', 'ax', '.', 'values', '[', 'x', ']', 'if', 'not', 'hasattr', '(', 'ax', ',', "'levels'", ')', 'else', 'ax', '.', 'values', '[', 'x', ']', '[', 'level', ']']
Return the values of the labels for the header of columns or rows. The value corresponds to the header of column or row x in the given level.
['Return', 'the', 'values', 'of', 'the', 'labels', 'for', 'the', 'header', 'of', 'columns', 'or', 'rows', '.', 'The', 'value', 'corresponds', 'to', 'the', 'header', 'of', 'column', 'or', 'row', 'x', 'in', 'the', 'given', 'level', '.']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/dataframeeditor.py#L194-L203
203
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
MAVLink.gps_raw_int_encode
def gps_raw_int_encode(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible): ''' The global position, as returned by the Global Positioning System (GPS). This is NOT the global position estimate of the system, but rather a RAW sensor value. See message GLOBAL_POSITION for the global position estimate. Coordinate frame is right-handed, Z-axis up (GPS frame). time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) fix_type : See the GPS_FIX_TYPE enum. (uint8_t) lat : Latitude (WGS84), in degrees * 1E7 (int32_t) lon : Longitude (WGS84), in degrees * 1E7 (int32_t) alt : Altitude (AMSL, NOT WGS84), in meters * 1000 (positive for up). Note that virtually all GPS modules provide the AMSL altitude in addition to the WGS84 altitude. (int32_t) eph : GPS HDOP horizontal dilution of position (unitless). If unknown, set to: UINT16_MAX (uint16_t) epv : GPS VDOP vertical dilution of position (unitless). If unknown, set to: UINT16_MAX (uint16_t) vel : GPS ground speed (m/s * 100). If unknown, set to: UINT16_MAX (uint16_t) cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: UINT16_MAX (uint16_t) satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t) ''' return MAVLink_gps_raw_int_message(time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible)
python
def gps_raw_int_encode(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible): ''' The global position, as returned by the Global Positioning System (GPS). This is NOT the global position estimate of the system, but rather a RAW sensor value. See message GLOBAL_POSITION for the global position estimate. Coordinate frame is right-handed, Z-axis up (GPS frame). time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) fix_type : See the GPS_FIX_TYPE enum. (uint8_t) lat : Latitude (WGS84), in degrees * 1E7 (int32_t) lon : Longitude (WGS84), in degrees * 1E7 (int32_t) alt : Altitude (AMSL, NOT WGS84), in meters * 1000 (positive for up). Note that virtually all GPS modules provide the AMSL altitude in addition to the WGS84 altitude. (int32_t) eph : GPS HDOP horizontal dilution of position (unitless). If unknown, set to: UINT16_MAX (uint16_t) epv : GPS VDOP vertical dilution of position (unitless). If unknown, set to: UINT16_MAX (uint16_t) vel : GPS ground speed (m/s * 100). If unknown, set to: UINT16_MAX (uint16_t) cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: UINT16_MAX (uint16_t) satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t) ''' return MAVLink_gps_raw_int_message(time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible)
['def', 'gps_raw_int_encode', '(', 'self', ',', 'time_usec', ',', 'fix_type', ',', 'lat', ',', 'lon', ',', 'alt', ',', 'eph', ',', 'epv', ',', 'vel', ',', 'cog', ',', 'satellites_visible', ')', ':', 'return', 'MAVLink_gps_raw_int_message', '(', 'time_usec', ',', 'fix_type', ',', 'lat', ',', 'lon', ',', 'alt', ',', 'eph', ',', 'epv', ',', 'vel', ',', 'cog', ',', 'satellites_visible', ')']
The global position, as returned by the Global Positioning System (GPS). This is NOT the global position estimate of the system, but rather a RAW sensor value. See message GLOBAL_POSITION for the global position estimate. Coordinate frame is right-handed, Z-axis up (GPS frame). time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) fix_type : See the GPS_FIX_TYPE enum. (uint8_t) lat : Latitude (WGS84), in degrees * 1E7 (int32_t) lon : Longitude (WGS84), in degrees * 1E7 (int32_t) alt : Altitude (AMSL, NOT WGS84), in meters * 1000 (positive for up). Note that virtually all GPS modules provide the AMSL altitude in addition to the WGS84 altitude. (int32_t) eph : GPS HDOP horizontal dilution of position (unitless). If unknown, set to: UINT16_MAX (uint16_t) epv : GPS VDOP vertical dilution of position (unitless). If unknown, set to: UINT16_MAX (uint16_t) vel : GPS ground speed (m/s * 100). If unknown, set to: UINT16_MAX (uint16_t) cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: UINT16_MAX (uint16_t) satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t)
['The', 'global', 'position', 'as', 'returned', 'by', 'the', 'Global', 'Positioning', 'System', '(', 'GPS', ')', '.', 'This', 'is', 'NOT', 'the', 'global', 'position', 'estimate', 'of', 'the', 'system', 'but', 'rather', 'a', 'RAW', 'sensor', 'value', '.', 'See', 'message', 'GLOBAL_POSITION', 'for', 'the', 'global', 'position', 'estimate', '.', 'Coordinate', 'frame', 'is', 'right', '-', 'handed', 'Z', '-', 'axis', 'up', '(', 'GPS', 'frame', ')', '.']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L8913-L8934
204
numberoverzero/bloop
bloop/transactions.py
ReadTransaction.load
def load(self, *objs) -> "ReadTransaction": """ Add one or more objects to be loaded in this transaction. At most 10 items can be loaded in the same transaction. All objects will be loaded each time you call commit(). :param objs: Objects to add to the set that are loaded in this transaction. :return: this transaction for chaining :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded. """ self._extend([TxItem.new("get", obj) for obj in objs]) return self
python
def load(self, *objs) -> "ReadTransaction": """ Add one or more objects to be loaded in this transaction. At most 10 items can be loaded in the same transaction. All objects will be loaded each time you call commit(). :param objs: Objects to add to the set that are loaded in this transaction. :return: this transaction for chaining :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded. """ self._extend([TxItem.new("get", obj) for obj in objs]) return self
['def', 'load', '(', 'self', ',', '*', 'objs', ')', '->', '"ReadTransaction"', ':', 'self', '.', '_extend', '(', '[', 'TxItem', '.', 'new', '(', '"get"', ',', 'obj', ')', 'for', 'obj', 'in', 'objs', ']', ')', 'return', 'self']
Add one or more objects to be loaded in this transaction. At most 10 items can be loaded in the same transaction. All objects will be loaded each time you call commit(). :param objs: Objects to add to the set that are loaded in this transaction. :return: this transaction for chaining :raises bloop.exceptions.MissingObjects: if one or more objects aren't loaded.
['Add', 'one', 'or', 'more', 'objects', 'to', 'be', 'loaded', 'in', 'this', 'transaction', '.']
train
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/transactions.py#L267-L280
205
gwpy/gwpy
gwpy/spectrogram/spectrogram.py
Spectrogram.crop_frequencies
def crop_frequencies(self, low=None, high=None, copy=False): """Crop this `Spectrogram` to the specified frequencies Parameters ---------- low : `float` lower frequency bound for cropped `Spectrogram` high : `float` upper frequency bound for cropped `Spectrogram` copy : `bool` if `False` return a view of the original data, otherwise create a fresh memory copy Returns ------- spec : `Spectrogram` A new `Spectrogram` with a subset of data from the frequency axis """ if low is not None: low = units.Quantity(low, self._default_yunit) if high is not None: high = units.Quantity(high, self._default_yunit) # check low frequency if low is not None and low == self.f0: low = None elif low is not None and low < self.f0: warnings.warn('Spectrogram.crop_frequencies given low frequency ' 'cutoff below f0 of the input Spectrogram. Low ' 'frequency crop will have no effect.') # check high frequency if high is not None and high.value == self.band[1]: high = None elif high is not None and high.value > self.band[1]: warnings.warn('Spectrogram.crop_frequencies given high frequency ' 'cutoff above cutoff of the input Spectrogram. High ' 'frequency crop will have no effect.') # find low index if low is None: idx0 = None else: idx0 = int(float(low.value - self.f0.value) // self.df.value) # find high index if high is None: idx1 = None else: idx1 = int(float(high.value - self.f0.value) // self.df.value) # crop if copy: return self[:, idx0:idx1].copy() return self[:, idx0:idx1]
python
def crop_frequencies(self, low=None, high=None, copy=False): """Crop this `Spectrogram` to the specified frequencies Parameters ---------- low : `float` lower frequency bound for cropped `Spectrogram` high : `float` upper frequency bound for cropped `Spectrogram` copy : `bool` if `False` return a view of the original data, otherwise create a fresh memory copy Returns ------- spec : `Spectrogram` A new `Spectrogram` with a subset of data from the frequency axis """ if low is not None: low = units.Quantity(low, self._default_yunit) if high is not None: high = units.Quantity(high, self._default_yunit) # check low frequency if low is not None and low == self.f0: low = None elif low is not None and low < self.f0: warnings.warn('Spectrogram.crop_frequencies given low frequency ' 'cutoff below f0 of the input Spectrogram. Low ' 'frequency crop will have no effect.') # check high frequency if high is not None and high.value == self.band[1]: high = None elif high is not None and high.value > self.band[1]: warnings.warn('Spectrogram.crop_frequencies given high frequency ' 'cutoff above cutoff of the input Spectrogram. High ' 'frequency crop will have no effect.') # find low index if low is None: idx0 = None else: idx0 = int(float(low.value - self.f0.value) // self.df.value) # find high index if high is None: idx1 = None else: idx1 = int(float(high.value - self.f0.value) // self.df.value) # crop if copy: return self[:, idx0:idx1].copy() return self[:, idx0:idx1]
['def', 'crop_frequencies', '(', 'self', ',', 'low', '=', 'None', ',', 'high', '=', 'None', ',', 'copy', '=', 'False', ')', ':', 'if', 'low', 'is', 'not', 'None', ':', 'low', '=', 'units', '.', 'Quantity', '(', 'low', ',', 'self', '.', '_default_yunit', ')', 'if', 'high', 'is', 'not', 'None', ':', 'high', '=', 'units', '.', 'Quantity', '(', 'high', ',', 'self', '.', '_default_yunit', ')', '# check low frequency', 'if', 'low', 'is', 'not', 'None', 'and', 'low', '==', 'self', '.', 'f0', ':', 'low', '=', 'None', 'elif', 'low', 'is', 'not', 'None', 'and', 'low', '<', 'self', '.', 'f0', ':', 'warnings', '.', 'warn', '(', "'Spectrogram.crop_frequencies given low frequency '", "'cutoff below f0 of the input Spectrogram. Low '", "'frequency crop will have no effect.'", ')', '# check high frequency', 'if', 'high', 'is', 'not', 'None', 'and', 'high', '.', 'value', '==', 'self', '.', 'band', '[', '1', ']', ':', 'high', '=', 'None', 'elif', 'high', 'is', 'not', 'None', 'and', 'high', '.', 'value', '>', 'self', '.', 'band', '[', '1', ']', ':', 'warnings', '.', 'warn', '(', "'Spectrogram.crop_frequencies given high frequency '", "'cutoff above cutoff of the input Spectrogram. High '", "'frequency crop will have no effect.'", ')', '# find low index', 'if', 'low', 'is', 'None', ':', 'idx0', '=', 'None', 'else', ':', 'idx0', '=', 'int', '(', 'float', '(', 'low', '.', 'value', '-', 'self', '.', 'f0', '.', 'value', ')', '//', 'self', '.', 'df', '.', 'value', ')', '# find high index', 'if', 'high', 'is', 'None', ':', 'idx1', '=', 'None', 'else', ':', 'idx1', '=', 'int', '(', 'float', '(', 'high', '.', 'value', '-', 'self', '.', 'f0', '.', 'value', ')', '//', 'self', '.', 'df', '.', 'value', ')', '# crop', 'if', 'copy', ':', 'return', 'self', '[', ':', ',', 'idx0', ':', 'idx1', ']', '.', 'copy', '(', ')', 'return', 'self', '[', ':', ',', 'idx0', ':', 'idx1', ']']
Crop this `Spectrogram` to the specified frequencies Parameters ---------- low : `float` lower frequency bound for cropped `Spectrogram` high : `float` upper frequency bound for cropped `Spectrogram` copy : `bool` if `False` return a view of the original data, otherwise create a fresh memory copy Returns ------- spec : `Spectrogram` A new `Spectrogram` with a subset of data from the frequency axis
['Crop', 'this', 'Spectrogram', 'to', 'the', 'specified', 'frequencies']
train
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/spectrogram/spectrogram.py#L541-L591
206
PmagPy/PmagPy
SPD/lib/lib_curvature.py
LMA
def LMA(XY,ParIni): """ input: list of x and y values [[x_1, y_1], [x_2, y_2], ....], and a tuple containing an initial guess (a, b, r) which is acquired by using an algebraic circle fit (TaubinSVD) output: a, b, r. a and b are the center of the fitting circle, and r is the radius % Geometric circle fit (minimizing orthogonal distances) % based on the Levenberg-Marquardt scheme in the % "algebraic parameters" A,B,C,D with constraint B*B+C*C-4*A*D=1 % N. Chernov and C. Lesort, "Least squares fitting of circles", % J. Math. Imag. Vision, Vol. 23, 239-251 (2005) """ factorUp=10 factorDown=0.04 lambda0=0.01 epsilon=0.000001 IterMAX = 50 AdjustMax = 20 Xshift=0 Yshift=0 dX=1 dY=0; n = len(XY); # number of data points anew = ParIni[0] + Xshift bnew = ParIni[1] + Yshift Anew = old_div(1.,(2.*ParIni[2])) aabb = anew*anew + bnew*bnew Fnew = (aabb - ParIni[2]*ParIni[2])*Anew Tnew = numpy.arccos(old_div(-anew,numpy.sqrt(aabb))) if bnew > 0: Tnew = 2*numpy.pi - Tnew VarNew = VarCircle(XY,ParIni) VarLambda = lambda0; finish = 0; for it in range(0,IterMAX): Aold = Anew Fold = Fnew Told = Tnew VarOld = VarNew H = numpy.sqrt(1+4*Aold*Fold); aold = -H*numpy.cos(Told)/(Aold+Aold) - Xshift; bold = -H*numpy.sin(Told)/(Aold+Aold) - Yshift; Rold = old_div(1,abs(Aold+Aold)); DD = 1 + 4*Aold*Fold; D = numpy.sqrt(DD); CT = numpy.cos(Told); ST = numpy.sin(Told); H11=0; H12=0; H13=0; H22=0; H23=0; H33=0; F1=0; F2=0; F3=0; for i in range(0,n): Xi = XY[i,0] + Xshift; Yi = XY[i,1] + Yshift; Zi = Xi*Xi + Yi*Yi; Ui = Xi*CT + Yi*ST; Vi =-Xi*ST + Yi*CT; ADF = Aold*Zi + D*Ui + Fold; SQ = numpy.sqrt(4*Aold*ADF + 1); DEN = SQ + 1; Gi = 2*ADF/DEN; FACT = 2/DEN*(1 - Aold*Gi/SQ); DGDAi = FACT*(Zi + 2*Fold*Ui/D) - Gi*Gi/SQ; DGDFi = FACT*(2*Aold*Ui/D + 1); DGDTi = FACT*D*Vi; H11 = H11 + DGDAi*DGDAi; H12 = H12 + DGDAi*DGDFi; H13 = H13 + DGDAi*DGDTi; H22 = H22 + DGDFi*DGDFi; H23 = H23 + DGDFi*DGDTi; H33 = H33 + DGDTi*DGDTi; F1 = F1 + Gi*DGDAi; F2 = F2 + Gi*DGDFi; F3 = F3 + Gi*DGDTi; for adjust in range(1,AdjustMax): # Cholesly decomposition G11 = numpy.sqrt(H11 + VarLambda); G12 = old_div(H12,G11) G13 = old_div(H13,G11) G22 = numpy.sqrt(H22 + VarLambda - G12*G12); G23 = old_div((H23 - G12*G13),G22); G33 = numpy.sqrt(H33 + VarLambda - G13*G13 - G23*G23); D1 = old_div(F1,G11); D2 = old_div((F2 - G12*D1),G22); D3 = old_div((F3 - G13*D1 - G23*D2),G33); dT = old_div(D3,G33); dF = old_div((D2 - G23*dT),G22) dA = old_div((D1 - G12*dF - G13*dT),G11) # updating the parameters Anew = Aold - dA; Fnew = Fold - dF; Tnew = Told - dT; if 1+4*Anew*Fnew < epsilon and VarLambda>1: Xshift = Xshift + dX; Yshift = Yshift + dY; H = numpy.sqrt(1+4*Aold*Fold); aTemp = -H*numpy.cos(Told)/(Aold+Aold) + dX; bTemp = -H*numpy.sin(Told)/(Aold+Aold) + dY; rTemp = old_div(1,abs(Aold+Aold)); Anew = old_div(1,(rTemp + rTemp)); aabb = aTemp*aTemp + bTemp*bTemp; Fnew = (aabb - rTemp*rTemp)*Anew; Tnew = numpy.arccos(old_div(-aTemp,numpy.sqrt(aabb))); if bTemp > 0: Tnew = 2*numpy.pi - Tnew; VarNew = VarOld; break; if 1+4*Anew*Fnew < epsilon: VarLambda = VarLambda * factorUp; continue; DD = 1 + 4*Anew*Fnew; D = numpy.sqrt(DD); CT = numpy.cos(Tnew); ST = numpy.sin(Tnew); GG = 0; for i in range(0, n): Xi = XY[i,0] + Xshift; Yi = XY[i,1] + Yshift; Zi = Xi*Xi + Yi*Yi; Ui = Xi*CT + Yi*ST; ADF = Anew*Zi + D*Ui + Fnew; SQ = numpy.sqrt(4*Anew*ADF + 1); DEN = SQ + 1; Gi = 2*ADF/DEN; GG = GG + Gi*Gi; VarNew = old_div(GG,(n-3)); H = numpy.sqrt(1+4*Anew*Fnew); anew = -H*numpy.cos(Tnew)/(Anew+Anew) - Xshift; bnew = -H*numpy.sin(Tnew)/(Anew+Anew) - Yshift; Rnew = old_div(1,abs(Anew+Anew)); if VarNew <= VarOld: progress = old_div((abs(anew-aold) + abs(bnew-bold) + abs(Rnew-Rold)),(Rnew+Rold)); if progress < epsilon: Aold = Anew; Fold = Fnew; Told = Tnew; VarOld = VarNew # %#ok<NASGU> finish = 1; break; VarLambda = VarLambda * factorDown break else: # % no improvement VarLambda = VarLambda * factorUp; continue; if finish == 1: break H = numpy.sqrt(1+4*Aold*Fold); result_a = -H*numpy.cos(Told)/(Aold+Aold) - Xshift; result_b = -H*numpy.sin(Told)/(Aold+Aold) - Yshift; result_r = old_div(1,abs(Aold+Aold)); return result_a, result_b, result_r
python
def LMA(XY,ParIni): """ input: list of x and y values [[x_1, y_1], [x_2, y_2], ....], and a tuple containing an initial guess (a, b, r) which is acquired by using an algebraic circle fit (TaubinSVD) output: a, b, r. a and b are the center of the fitting circle, and r is the radius % Geometric circle fit (minimizing orthogonal distances) % based on the Levenberg-Marquardt scheme in the % "algebraic parameters" A,B,C,D with constraint B*B+C*C-4*A*D=1 % N. Chernov and C. Lesort, "Least squares fitting of circles", % J. Math. Imag. Vision, Vol. 23, 239-251 (2005) """ factorUp=10 factorDown=0.04 lambda0=0.01 epsilon=0.000001 IterMAX = 50 AdjustMax = 20 Xshift=0 Yshift=0 dX=1 dY=0; n = len(XY); # number of data points anew = ParIni[0] + Xshift bnew = ParIni[1] + Yshift Anew = old_div(1.,(2.*ParIni[2])) aabb = anew*anew + bnew*bnew Fnew = (aabb - ParIni[2]*ParIni[2])*Anew Tnew = numpy.arccos(old_div(-anew,numpy.sqrt(aabb))) if bnew > 0: Tnew = 2*numpy.pi - Tnew VarNew = VarCircle(XY,ParIni) VarLambda = lambda0; finish = 0; for it in range(0,IterMAX): Aold = Anew Fold = Fnew Told = Tnew VarOld = VarNew H = numpy.sqrt(1+4*Aold*Fold); aold = -H*numpy.cos(Told)/(Aold+Aold) - Xshift; bold = -H*numpy.sin(Told)/(Aold+Aold) - Yshift; Rold = old_div(1,abs(Aold+Aold)); DD = 1 + 4*Aold*Fold; D = numpy.sqrt(DD); CT = numpy.cos(Told); ST = numpy.sin(Told); H11=0; H12=0; H13=0; H22=0; H23=0; H33=0; F1=0; F2=0; F3=0; for i in range(0,n): Xi = XY[i,0] + Xshift; Yi = XY[i,1] + Yshift; Zi = Xi*Xi + Yi*Yi; Ui = Xi*CT + Yi*ST; Vi =-Xi*ST + Yi*CT; ADF = Aold*Zi + D*Ui + Fold; SQ = numpy.sqrt(4*Aold*ADF + 1); DEN = SQ + 1; Gi = 2*ADF/DEN; FACT = 2/DEN*(1 - Aold*Gi/SQ); DGDAi = FACT*(Zi + 2*Fold*Ui/D) - Gi*Gi/SQ; DGDFi = FACT*(2*Aold*Ui/D + 1); DGDTi = FACT*D*Vi; H11 = H11 + DGDAi*DGDAi; H12 = H12 + DGDAi*DGDFi; H13 = H13 + DGDAi*DGDTi; H22 = H22 + DGDFi*DGDFi; H23 = H23 + DGDFi*DGDTi; H33 = H33 + DGDTi*DGDTi; F1 = F1 + Gi*DGDAi; F2 = F2 + Gi*DGDFi; F3 = F3 + Gi*DGDTi; for adjust in range(1,AdjustMax): # Cholesly decomposition G11 = numpy.sqrt(H11 + VarLambda); G12 = old_div(H12,G11) G13 = old_div(H13,G11) G22 = numpy.sqrt(H22 + VarLambda - G12*G12); G23 = old_div((H23 - G12*G13),G22); G33 = numpy.sqrt(H33 + VarLambda - G13*G13 - G23*G23); D1 = old_div(F1,G11); D2 = old_div((F2 - G12*D1),G22); D3 = old_div((F3 - G13*D1 - G23*D2),G33); dT = old_div(D3,G33); dF = old_div((D2 - G23*dT),G22) dA = old_div((D1 - G12*dF - G13*dT),G11) # updating the parameters Anew = Aold - dA; Fnew = Fold - dF; Tnew = Told - dT; if 1+4*Anew*Fnew < epsilon and VarLambda>1: Xshift = Xshift + dX; Yshift = Yshift + dY; H = numpy.sqrt(1+4*Aold*Fold); aTemp = -H*numpy.cos(Told)/(Aold+Aold) + dX; bTemp = -H*numpy.sin(Told)/(Aold+Aold) + dY; rTemp = old_div(1,abs(Aold+Aold)); Anew = old_div(1,(rTemp + rTemp)); aabb = aTemp*aTemp + bTemp*bTemp; Fnew = (aabb - rTemp*rTemp)*Anew; Tnew = numpy.arccos(old_div(-aTemp,numpy.sqrt(aabb))); if bTemp > 0: Tnew = 2*numpy.pi - Tnew; VarNew = VarOld; break; if 1+4*Anew*Fnew < epsilon: VarLambda = VarLambda * factorUp; continue; DD = 1 + 4*Anew*Fnew; D = numpy.sqrt(DD); CT = numpy.cos(Tnew); ST = numpy.sin(Tnew); GG = 0; for i in range(0, n): Xi = XY[i,0] + Xshift; Yi = XY[i,1] + Yshift; Zi = Xi*Xi + Yi*Yi; Ui = Xi*CT + Yi*ST; ADF = Anew*Zi + D*Ui + Fnew; SQ = numpy.sqrt(4*Anew*ADF + 1); DEN = SQ + 1; Gi = 2*ADF/DEN; GG = GG + Gi*Gi; VarNew = old_div(GG,(n-3)); H = numpy.sqrt(1+4*Anew*Fnew); anew = -H*numpy.cos(Tnew)/(Anew+Anew) - Xshift; bnew = -H*numpy.sin(Tnew)/(Anew+Anew) - Yshift; Rnew = old_div(1,abs(Anew+Anew)); if VarNew <= VarOld: progress = old_div((abs(anew-aold) + abs(bnew-bold) + abs(Rnew-Rold)),(Rnew+Rold)); if progress < epsilon: Aold = Anew; Fold = Fnew; Told = Tnew; VarOld = VarNew # %#ok<NASGU> finish = 1; break; VarLambda = VarLambda * factorDown break else: # % no improvement VarLambda = VarLambda * factorUp; continue; if finish == 1: break H = numpy.sqrt(1+4*Aold*Fold); result_a = -H*numpy.cos(Told)/(Aold+Aold) - Xshift; result_b = -H*numpy.sin(Told)/(Aold+Aold) - Yshift; result_r = old_div(1,abs(Aold+Aold)); return result_a, result_b, result_r
['def', 'LMA', '(', 'XY', ',', 'ParIni', ')', ':', 'factorUp', '=', '10', 'factorDown', '=', '0.04', 'lambda0', '=', '0.01', 'epsilon', '=', '0.000001', 'IterMAX', '=', '50', 'AdjustMax', '=', '20', 'Xshift', '=', '0', 'Yshift', '=', '0', 'dX', '=', '1', 'dY', '=', '0', 'n', '=', 'len', '(', 'XY', ')', '# number of data points', 'anew', '=', 'ParIni', '[', '0', ']', '+', 'Xshift', 'bnew', '=', 'ParIni', '[', '1', ']', '+', 'Yshift', 'Anew', '=', 'old_div', '(', '1.', ',', '(', '2.', '*', 'ParIni', '[', '2', ']', ')', ')', 'aabb', '=', 'anew', '*', 'anew', '+', 'bnew', '*', 'bnew', 'Fnew', '=', '(', 'aabb', '-', 'ParIni', '[', '2', ']', '*', 'ParIni', '[', '2', ']', ')', '*', 'Anew', 'Tnew', '=', 'numpy', '.', 'arccos', '(', 'old_div', '(', '-', 'anew', ',', 'numpy', '.', 'sqrt', '(', 'aabb', ')', ')', ')', 'if', 'bnew', '>', '0', ':', 'Tnew', '=', '2', '*', 'numpy', '.', 'pi', '-', 'Tnew', 'VarNew', '=', 'VarCircle', '(', 'XY', ',', 'ParIni', ')', 'VarLambda', '=', 'lambda0', 'finish', '=', '0', 'for', 'it', 'in', 'range', '(', '0', ',', 'IterMAX', ')', ':', 'Aold', '=', 'Anew', 'Fold', '=', 'Fnew', 'Told', '=', 'Tnew', 'VarOld', '=', 'VarNew', 'H', '=', 'numpy', '.', 'sqrt', '(', '1', '+', '4', '*', 'Aold', '*', 'Fold', ')', 'aold', '=', '-', 'H', '*', 'numpy', '.', 'cos', '(', 'Told', ')', '/', '(', 'Aold', '+', 'Aold', ')', '-', 'Xshift', 'bold', '=', '-', 'H', '*', 'numpy', '.', 'sin', '(', 'Told', ')', '/', '(', 'Aold', '+', 'Aold', ')', '-', 'Yshift', 'Rold', '=', 'old_div', '(', '1', ',', 'abs', '(', 'Aold', '+', 'Aold', ')', ')', 'DD', '=', '1', '+', '4', '*', 'Aold', '*', 'Fold', 'D', '=', 'numpy', '.', 'sqrt', '(', 'DD', ')', 'CT', '=', 'numpy', '.', 'cos', '(', 'Told', ')', 'ST', '=', 'numpy', '.', 'sin', '(', 'Told', ')', 'H11', '=', '0', 'H12', '=', '0', 'H13', '=', '0', 'H22', '=', '0', 'H23', '=', '0', 'H33', '=', '0', 'F1', '=', '0', 'F2', '=', '0', 'F3', '=', '0', 'for', 'i', 'in', 'range', '(', '0', ',', 'n', ')', ':', 'Xi', '=', 'XY', '[', 'i', ',', '0', ']', '+', 'Xshift', 'Yi', '=', 'XY', '[', 'i', ',', '1', ']', '+', 'Yshift', 'Zi', '=', 'Xi', '*', 'Xi', '+', 'Yi', '*', 'Yi', 'Ui', '=', 'Xi', '*', 'CT', '+', 'Yi', '*', 'ST', 'Vi', '=', '-', 'Xi', '*', 'ST', '+', 'Yi', '*', 'CT', 'ADF', '=', 'Aold', '*', 'Zi', '+', 'D', '*', 'Ui', '+', 'Fold', 'SQ', '=', 'numpy', '.', 'sqrt', '(', '4', '*', 'Aold', '*', 'ADF', '+', '1', ')', 'DEN', '=', 'SQ', '+', '1', 'Gi', '=', '2', '*', 'ADF', '/', 'DEN', 'FACT', '=', '2', '/', 'DEN', '*', '(', '1', '-', 'Aold', '*', 'Gi', '/', 'SQ', ')', 'DGDAi', '=', 'FACT', '*', '(', 'Zi', '+', '2', '*', 'Fold', '*', 'Ui', '/', 'D', ')', '-', 'Gi', '*', 'Gi', '/', 'SQ', 'DGDFi', '=', 'FACT', '*', '(', '2', '*', 'Aold', '*', 'Ui', '/', 'D', '+', '1', ')', 'DGDTi', '=', 'FACT', '*', 'D', '*', 'Vi', 'H11', '=', 'H11', '+', 'DGDAi', '*', 'DGDAi', 'H12', '=', 'H12', '+', 'DGDAi', '*', 'DGDFi', 'H13', '=', 'H13', '+', 'DGDAi', '*', 'DGDTi', 'H22', '=', 'H22', '+', 'DGDFi', '*', 'DGDFi', 'H23', '=', 'H23', '+', 'DGDFi', '*', 'DGDTi', 'H33', '=', 'H33', '+', 'DGDTi', '*', 'DGDTi', 'F1', '=', 'F1', '+', 'Gi', '*', 'DGDAi', 'F2', '=', 'F2', '+', 'Gi', '*', 'DGDFi', 'F3', '=', 'F3', '+', 'Gi', '*', 'DGDTi', 'for', 'adjust', 'in', 'range', '(', '1', ',', 'AdjustMax', ')', ':', '# Cholesly decomposition ', 'G11', '=', 'numpy', '.', 'sqrt', '(', 'H11', '+', 'VarLambda', ')', 'G12', '=', 'old_div', '(', 'H12', ',', 'G11', ')', 'G13', '=', 'old_div', '(', 'H13', ',', 'G11', ')', 'G22', '=', 'numpy', '.', 'sqrt', '(', 'H22', '+', 'VarLambda', '-', 'G12', '*', 'G12', ')', 'G23', '=', 'old_div', '(', '(', 'H23', '-', 'G12', '*', 'G13', ')', ',', 'G22', ')', 'G33', '=', 'numpy', '.', 'sqrt', '(', 'H33', '+', 'VarLambda', '-', 'G13', '*', 'G13', '-', 'G23', '*', 'G23', ')', 'D1', '=', 'old_div', '(', 'F1', ',', 'G11', ')', 'D2', '=', 'old_div', '(', '(', 'F2', '-', 'G12', '*', 'D1', ')', ',', 'G22', ')', 'D3', '=', 'old_div', '(', '(', 'F3', '-', 'G13', '*', 'D1', '-', 'G23', '*', 'D2', ')', ',', 'G33', ')', 'dT', '=', 'old_div', '(', 'D3', ',', 'G33', ')', 'dF', '=', 'old_div', '(', '(', 'D2', '-', 'G23', '*', 'dT', ')', ',', 'G22', ')', 'dA', '=', 'old_div', '(', '(', 'D1', '-', 'G12', '*', 'dF', '-', 'G13', '*', 'dT', ')', ',', 'G11', ')', '# updating the parameters', 'Anew', '=', 'Aold', '-', 'dA', 'Fnew', '=', 'Fold', '-', 'dF', 'Tnew', '=', 'Told', '-', 'dT', 'if', '1', '+', '4', '*', 'Anew', '*', 'Fnew', '<', 'epsilon', 'and', 'VarLambda', '>', '1', ':', 'Xshift', '=', 'Xshift', '+', 'dX', 'Yshift', '=', 'Yshift', '+', 'dY', 'H', '=', 'numpy', '.', 'sqrt', '(', '1', '+', '4', '*', 'Aold', '*', 'Fold', ')', 'aTemp', '=', '-', 'H', '*', 'numpy', '.', 'cos', '(', 'Told', ')', '/', '(', 'Aold', '+', 'Aold', ')', '+', 'dX', 'bTemp', '=', '-', 'H', '*', 'numpy', '.', 'sin', '(', 'Told', ')', '/', '(', 'Aold', '+', 'Aold', ')', '+', 'dY', 'rTemp', '=', 'old_div', '(', '1', ',', 'abs', '(', 'Aold', '+', 'Aold', ')', ')', 'Anew', '=', 'old_div', '(', '1', ',', '(', 'rTemp', '+', 'rTemp', ')', ')', 'aabb', '=', 'aTemp', '*', 'aTemp', '+', 'bTemp', '*', 'bTemp', 'Fnew', '=', '(', 'aabb', '-', 'rTemp', '*', 'rTemp', ')', '*', 'Anew', 'Tnew', '=', 'numpy', '.', 'arccos', '(', 'old_div', '(', '-', 'aTemp', ',', 'numpy', '.', 'sqrt', '(', 'aabb', ')', ')', ')', 'if', 'bTemp', '>', '0', ':', 'Tnew', '=', '2', '*', 'numpy', '.', 'pi', '-', 'Tnew', 'VarNew', '=', 'VarOld', 'break', 'if', '1', '+', '4', '*', 'Anew', '*', 'Fnew', '<', 'epsilon', ':', 'VarLambda', '=', 'VarLambda', '*', 'factorUp', 'continue', 'DD', '=', '1', '+', '4', '*', 'Anew', '*', 'Fnew', 'D', '=', 'numpy', '.', 'sqrt', '(', 'DD', ')', 'CT', '=', 'numpy', '.', 'cos', '(', 'Tnew', ')', 'ST', '=', 'numpy', '.', 'sin', '(', 'Tnew', ')', 'GG', '=', '0', 'for', 'i', 'in', 'range', '(', '0', ',', 'n', ')', ':', 'Xi', '=', 'XY', '[', 'i', ',', '0', ']', '+', 'Xshift', 'Yi', '=', 'XY', '[', 'i', ',', '1', ']', '+', 'Yshift', 'Zi', '=', 'Xi', '*', 'Xi', '+', 'Yi', '*', 'Yi', 'Ui', '=', 'Xi', '*', 'CT', '+', 'Yi', '*', 'ST', 'ADF', '=', 'Anew', '*', 'Zi', '+', 'D', '*', 'Ui', '+', 'Fnew', 'SQ', '=', 'numpy', '.', 'sqrt', '(', '4', '*', 'Anew', '*', 'ADF', '+', '1', ')', 'DEN', '=', 'SQ', '+', '1', 'Gi', '=', '2', '*', 'ADF', '/', 'DEN', 'GG', '=', 'GG', '+', 'Gi', '*', 'Gi', 'VarNew', '=', 'old_div', '(', 'GG', ',', '(', 'n', '-', '3', ')', ')', 'H', '=', 'numpy', '.', 'sqrt', '(', '1', '+', '4', '*', 'Anew', '*', 'Fnew', ')', 'anew', '=', '-', 'H', '*', 'numpy', '.', 'cos', '(', 'Tnew', ')', '/', '(', 'Anew', '+', 'Anew', ')', '-', 'Xshift', 'bnew', '=', '-', 'H', '*', 'numpy', '.', 'sin', '(', 'Tnew', ')', '/', '(', 'Anew', '+', 'Anew', ')', '-', 'Yshift', 'Rnew', '=', 'old_div', '(', '1', ',', 'abs', '(', 'Anew', '+', 'Anew', ')', ')', 'if', 'VarNew', '<=', 'VarOld', ':', 'progress', '=', 'old_div', '(', '(', 'abs', '(', 'anew', '-', 'aold', ')', '+', 'abs', '(', 'bnew', '-', 'bold', ')', '+', 'abs', '(', 'Rnew', '-', 'Rold', ')', ')', ',', '(', 'Rnew', '+', 'Rold', ')', ')', 'if', 'progress', '<', 'epsilon', ':', 'Aold', '=', 'Anew', 'Fold', '=', 'Fnew', 'Told', '=', 'Tnew', 'VarOld', '=', 'VarNew', '# %#ok<NASGU> ', 'finish', '=', '1', 'break', 'VarLambda', '=', 'VarLambda', '*', 'factorDown', 'break', 'else', ':', '# % no improvement ', 'VarLambda', '=', 'VarLambda', '*', 'factorUp', 'continue', 'if', 'finish', '==', '1', ':', 'break', 'H', '=', 'numpy', '.', 'sqrt', '(', '1', '+', '4', '*', 'Aold', '*', 'Fold', ')', 'result_a', '=', '-', 'H', '*', 'numpy', '.', 'cos', '(', 'Told', ')', '/', '(', 'Aold', '+', 'Aold', ')', '-', 'Xshift', 'result_b', '=', '-', 'H', '*', 'numpy', '.', 'sin', '(', 'Told', ')', '/', '(', 'Aold', '+', 'Aold', ')', '-', 'Yshift', 'result_r', '=', 'old_div', '(', '1', ',', 'abs', '(', 'Aold', '+', 'Aold', ')', ')', 'return', 'result_a', ',', 'result_b', ',', 'result_r']
input: list of x and y values [[x_1, y_1], [x_2, y_2], ....], and a tuple containing an initial guess (a, b, r) which is acquired by using an algebraic circle fit (TaubinSVD) output: a, b, r. a and b are the center of the fitting circle, and r is the radius % Geometric circle fit (minimizing orthogonal distances) % based on the Levenberg-Marquardt scheme in the % "algebraic parameters" A,B,C,D with constraint B*B+C*C-4*A*D=1 % N. Chernov and C. Lesort, "Least squares fitting of circles", % J. Math. Imag. Vision, Vol. 23, 239-251 (2005)
['input', ':', 'list', 'of', 'x', 'and', 'y', 'values', '[[', 'x_1', 'y_1', ']', '[', 'x_2', 'y_2', ']', '....', ']', 'and', 'a', 'tuple', 'containing', 'an', 'initial', 'guess', '(', 'a', 'b', 'r', ')', 'which', 'is', 'acquired', 'by', 'using', 'an', 'algebraic', 'circle', 'fit', '(', 'TaubinSVD', ')', 'output', ':', 'a', 'b', 'r', '.', 'a', 'and', 'b', 'are', 'the', 'center', 'of', 'the', 'fitting', 'circle', 'and', 'r', 'is', 'the', 'radius', '%', 'Geometric', 'circle', 'fit', '(', 'minimizing', 'orthogonal', 'distances', ')', '%', 'based', 'on', 'the', 'Levenberg', '-', 'Marquardt', 'scheme', 'in', 'the', '%', 'algebraic', 'parameters', 'A', 'B', 'C', 'D', 'with', 'constraint', 'B', '*', 'B', '+', 'C', '*', 'C', '-', '4', '*', 'A', '*', 'D', '=', '1', '%', 'N', '.', 'Chernov', 'and', 'C', '.', 'Lesort', 'Least', 'squares', 'fitting', 'of', 'circles', '%', 'J', '.', 'Math', '.', 'Imag', '.', 'Vision', 'Vol', '.', '23', '239', '-', '251', '(', '2005', ')']
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/SPD/lib/lib_curvature.py#L104-L293
207
RJT1990/pyflux
pyflux/ssm/dynlin.py
DynReg._forecast_model
def _forecast_model(self,beta,Z,h): """ Creates forecasted states and variances Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables Returns ---------- a : np.ndarray Forecasted states P : np.ndarray Variance of forecasted states """ T, _, R, Q, H = self._ss_matrices(beta) return dl_univariate_kalman_fcst(self.data,Z,H,T,Q,R,0.0,h)
python
def _forecast_model(self,beta,Z,h): """ Creates forecasted states and variances Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables Returns ---------- a : np.ndarray Forecasted states P : np.ndarray Variance of forecasted states """ T, _, R, Q, H = self._ss_matrices(beta) return dl_univariate_kalman_fcst(self.data,Z,H,T,Q,R,0.0,h)
['def', '_forecast_model', '(', 'self', ',', 'beta', ',', 'Z', ',', 'h', ')', ':', 'T', ',', '_', ',', 'R', ',', 'Q', ',', 'H', '=', 'self', '.', '_ss_matrices', '(', 'beta', ')', 'return', 'dl_univariate_kalman_fcst', '(', 'self', '.', 'data', ',', 'Z', ',', 'H', ',', 'T', ',', 'Q', ',', 'R', ',', '0.0', ',', 'h', ')']
Creates forecasted states and variances Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables Returns ---------- a : np.ndarray Forecasted states P : np.ndarray Variance of forecasted states
['Creates', 'forecasted', 'states', 'and', 'variances']
train
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/dynlin.py#L74-L92
208
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
BaseCache.field_date_to_json
def field_date_to_json(self, day): """Convert a date to a date triple.""" if isinstance(day, six.string_types): day = parse_date(day) return [day.year, day.month, day.day] if day else None
python
def field_date_to_json(self, day): """Convert a date to a date triple.""" if isinstance(day, six.string_types): day = parse_date(day) return [day.year, day.month, day.day] if day else None
['def', 'field_date_to_json', '(', 'self', ',', 'day', ')', ':', 'if', 'isinstance', '(', 'day', ',', 'six', '.', 'string_types', ')', ':', 'day', '=', 'parse_date', '(', 'day', ')', 'return', '[', 'day', '.', 'year', ',', 'day', '.', 'month', ',', 'day', '.', 'day', ']', 'if', 'day', 'else', 'None']
Convert a date to a date triple.
['Convert', 'a', 'date', 'to', 'a', 'date', 'triple', '.']
train
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L238-L242
209
ethpm/py-ethpm
ethpm/dependencies.py
Dependencies.items
def items(self) -> Tuple[Tuple[str, "Package"], ...]: # type: ignore """ Return an iterable containing package name and corresponding `Package` instance that are available. """ item_dict = { name: self.build_dependencies.get(name) for name in self.build_dependencies } return tuple(item_dict.items())
python
def items(self) -> Tuple[Tuple[str, "Package"], ...]: # type: ignore """ Return an iterable containing package name and corresponding `Package` instance that are available. """ item_dict = { name: self.build_dependencies.get(name) for name in self.build_dependencies } return tuple(item_dict.items())
['def', 'items', '(', 'self', ')', '->', 'Tuple', '[', 'Tuple', '[', 'str', ',', '"Package"', ']', ',', '...', ']', ':', '# type: ignore', 'item_dict', '=', '{', 'name', ':', 'self', '.', 'build_dependencies', '.', 'get', '(', 'name', ')', 'for', 'name', 'in', 'self', '.', 'build_dependencies', '}', 'return', 'tuple', '(', 'item_dict', '.', 'items', '(', ')', ')']
Return an iterable containing package name and corresponding `Package` instance that are available.
['Return', 'an', 'iterable', 'containing', 'package', 'name', 'and', 'corresponding', 'Package', 'instance', 'that', 'are', 'available', '.']
train
https://github.com/ethpm/py-ethpm/blob/81ed58d7c636fe00c6770edeb0401812b1a5e8fc/ethpm/dependencies.py#L28-L36
210
Esri/ArcREST
src/arcrest/manageorg/_content.py
User.createService
def createService(self, createServiceParameter, description=None, tags="Feature Service", snippet=None): """ The Create Service operation allows users to create a hosted feature service. You can use the API to create an empty hosted feaure service from feature service metadata JSON. Inputs: createServiceParameter - create service object """ url = "%s/createService" % self.location val = createServiceParameter.value params = { "f" : "json", "outputType" : "featureService", "createParameters" : json.dumps(val), "tags" : tags } if snippet is not None: params['snippet'] = snippet if description is not None: params['description'] = description res = self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) if 'id' in res or \ 'serviceItemId' in res: if 'id' in res: url = "%s/items/%s" % (self.location, res['id']) else: url = "%s/items/%s" % (self.location, res['serviceItemId']) return UserItem(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) return res
python
def createService(self, createServiceParameter, description=None, tags="Feature Service", snippet=None): """ The Create Service operation allows users to create a hosted feature service. You can use the API to create an empty hosted feaure service from feature service metadata JSON. Inputs: createServiceParameter - create service object """ url = "%s/createService" % self.location val = createServiceParameter.value params = { "f" : "json", "outputType" : "featureService", "createParameters" : json.dumps(val), "tags" : tags } if snippet is not None: params['snippet'] = snippet if description is not None: params['description'] = description res = self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) if 'id' in res or \ 'serviceItemId' in res: if 'id' in res: url = "%s/items/%s" % (self.location, res['id']) else: url = "%s/items/%s" % (self.location, res['serviceItemId']) return UserItem(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) return res
['def', 'createService', '(', 'self', ',', 'createServiceParameter', ',', 'description', '=', 'None', ',', 'tags', '=', '"Feature Service"', ',', 'snippet', '=', 'None', ')', ':', 'url', '=', '"%s/createService"', '%', 'self', '.', 'location', 'val', '=', 'createServiceParameter', '.', 'value', 'params', '=', '{', '"f"', ':', '"json"', ',', '"outputType"', ':', '"featureService"', ',', '"createParameters"', ':', 'json', '.', 'dumps', '(', 'val', ')', ',', '"tags"', ':', 'tags', '}', 'if', 'snippet', 'is', 'not', 'None', ':', 'params', '[', "'snippet'", ']', '=', 'snippet', 'if', 'description', 'is', 'not', 'None', ':', 'params', '[', "'description'", ']', '=', 'description', 'res', '=', 'self', '.', '_post', '(', 'url', '=', 'url', ',', 'param_dict', '=', 'params', ',', 'securityHandler', '=', 'self', '.', '_securityHandler', ',', 'proxy_port', '=', 'self', '.', '_proxy_port', ',', 'proxy_url', '=', 'self', '.', '_proxy_url', ')', 'if', "'id'", 'in', 'res', 'or', "'serviceItemId'", 'in', 'res', ':', 'if', "'id'", 'in', 'res', ':', 'url', '=', '"%s/items/%s"', '%', '(', 'self', '.', 'location', ',', 'res', '[', "'id'", ']', ')', 'else', ':', 'url', '=', '"%s/items/%s"', '%', '(', 'self', '.', 'location', ',', 'res', '[', "'serviceItemId'", ']', ')', 'return', 'UserItem', '(', 'url', '=', 'url', ',', 'securityHandler', '=', 'self', '.', '_securityHandler', ',', 'proxy_url', '=', 'self', '.', '_proxy_url', ',', 'proxy_port', '=', 'self', '.', '_proxy_port', ')', 'return', 'res']
The Create Service operation allows users to create a hosted feature service. You can use the API to create an empty hosted feaure service from feature service metadata JSON. Inputs: createServiceParameter - create service object
['The', 'Create', 'Service', 'operation', 'allows', 'users', 'to', 'create', 'a', 'hosted', 'feature', 'service', '.', 'You', 'can', 'use', 'the', 'API', 'to', 'create', 'an', 'empty', 'hosted', 'feaure', 'service', 'from', 'feature', 'service', 'metadata', 'JSON', '.']
train
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_content.py#L2515-L2554
211
JasonKessler/scattertext
scattertext/categoryprojector/CategoryProjector.py
CategoryProjectorBase.project_with_metadata
def project_with_metadata(self, term_doc_mat, x_dim=0, y_dim=1): ''' Returns a projection of the :param term_doc_mat: a TermDocMatrix :return: CategoryProjection ''' return self._project_category_corpus(self._get_category_metadata_corpus_and_replace_terms(term_doc_mat), x_dim, y_dim)
python
def project_with_metadata(self, term_doc_mat, x_dim=0, y_dim=1): ''' Returns a projection of the :param term_doc_mat: a TermDocMatrix :return: CategoryProjection ''' return self._project_category_corpus(self._get_category_metadata_corpus_and_replace_terms(term_doc_mat), x_dim, y_dim)
['def', 'project_with_metadata', '(', 'self', ',', 'term_doc_mat', ',', 'x_dim', '=', '0', ',', 'y_dim', '=', '1', ')', ':', 'return', 'self', '.', '_project_category_corpus', '(', 'self', '.', '_get_category_metadata_corpus_and_replace_terms', '(', 'term_doc_mat', ')', ',', 'x_dim', ',', 'y_dim', ')']
Returns a projection of the :param term_doc_mat: a TermDocMatrix :return: CategoryProjection
['Returns', 'a', 'projection', 'of', 'the']
train
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/categoryprojector/CategoryProjector.py#L41-L49
212
mingchen/django-cas-ng
django_cas_ng/backends.py
CASBackend.get_user_id
def get_user_id(self, attributes): """ For use when CAS_CREATE_USER_WITH_ID is True. Will raise ImproperlyConfigured exceptions when a user_id cannot be accessed. This is important because we shouldn't create Users with automatically assigned ids if we are trying to keep User primary key's in sync. """ if not attributes: raise ImproperlyConfigured("CAS_CREATE_USER_WITH_ID is True, but " "no attributes were provided") user_id = attributes.get('id') if not user_id: raise ImproperlyConfigured("CAS_CREATE_USER_WITH_ID is True, but " "`'id'` is not part of attributes.") return user_id
python
def get_user_id(self, attributes): """ For use when CAS_CREATE_USER_WITH_ID is True. Will raise ImproperlyConfigured exceptions when a user_id cannot be accessed. This is important because we shouldn't create Users with automatically assigned ids if we are trying to keep User primary key's in sync. """ if not attributes: raise ImproperlyConfigured("CAS_CREATE_USER_WITH_ID is True, but " "no attributes were provided") user_id = attributes.get('id') if not user_id: raise ImproperlyConfigured("CAS_CREATE_USER_WITH_ID is True, but " "`'id'` is not part of attributes.") return user_id
['def', 'get_user_id', '(', 'self', ',', 'attributes', ')', ':', 'if', 'not', 'attributes', ':', 'raise', 'ImproperlyConfigured', '(', '"CAS_CREATE_USER_WITH_ID is True, but "', '"no attributes were provided"', ')', 'user_id', '=', 'attributes', '.', 'get', '(', "'id'", ')', 'if', 'not', 'user_id', ':', 'raise', 'ImproperlyConfigured', '(', '"CAS_CREATE_USER_WITH_ID is True, but "', '"`\'id\'` is not part of attributes."', ')', 'return', 'user_id']
For use when CAS_CREATE_USER_WITH_ID is True. Will raise ImproperlyConfigured exceptions when a user_id cannot be accessed. This is important because we shouldn't create Users with automatically assigned ids if we are trying to keep User primary key's in sync.
['For', 'use', 'when', 'CAS_CREATE_USER_WITH_ID', 'is', 'True', '.', 'Will', 'raise', 'ImproperlyConfigured', 'exceptions', 'when', 'a', 'user_id', 'cannot', 'be', 'accessed', '.', 'This', 'is', 'important', 'because', 'we', 'shouldn', 't', 'create', 'Users', 'with', 'automatically', 'assigned', 'ids', 'if', 'we', 'are', 'trying', 'to', 'keep', 'User', 'primary', 'key', 's', 'in', 'sync', '.']
train
https://github.com/mingchen/django-cas-ng/blob/202ca92cd770d9679bfe4e9e20b41fd19b81c311/django_cas_ng/backends.py#L136-L153
213
apache/spark
python/pyspark/mllib/random.py
RandomRDDs.uniformVectorRDD
def uniformVectorRDD(sc, numRows, numCols, numPartitions=None, seed=None): """ Generates an RDD comprised of vectors containing i.i.d. samples drawn from the uniform distribution U(0.0, 1.0). :param sc: SparkContext used to create the RDD. :param numRows: Number of Vectors in the RDD. :param numCols: Number of elements in each Vector. :param numPartitions: Number of partitions in the RDD. :param seed: Seed for the RNG that generates the seed for the generator in each partition. :return: RDD of Vector with vectors containing i.i.d samples ~ `U(0.0, 1.0)`. >>> import numpy as np >>> mat = np.matrix(RandomRDDs.uniformVectorRDD(sc, 10, 10).collect()) >>> mat.shape (10, 10) >>> mat.max() <= 1.0 and mat.min() >= 0.0 True >>> RandomRDDs.uniformVectorRDD(sc, 10, 10, 4).getNumPartitions() 4 """ return callMLlibFunc("uniformVectorRDD", sc._jsc, numRows, numCols, numPartitions, seed)
python
def uniformVectorRDD(sc, numRows, numCols, numPartitions=None, seed=None): """ Generates an RDD comprised of vectors containing i.i.d. samples drawn from the uniform distribution U(0.0, 1.0). :param sc: SparkContext used to create the RDD. :param numRows: Number of Vectors in the RDD. :param numCols: Number of elements in each Vector. :param numPartitions: Number of partitions in the RDD. :param seed: Seed for the RNG that generates the seed for the generator in each partition. :return: RDD of Vector with vectors containing i.i.d samples ~ `U(0.0, 1.0)`. >>> import numpy as np >>> mat = np.matrix(RandomRDDs.uniformVectorRDD(sc, 10, 10).collect()) >>> mat.shape (10, 10) >>> mat.max() <= 1.0 and mat.min() >= 0.0 True >>> RandomRDDs.uniformVectorRDD(sc, 10, 10, 4).getNumPartitions() 4 """ return callMLlibFunc("uniformVectorRDD", sc._jsc, numRows, numCols, numPartitions, seed)
['def', 'uniformVectorRDD', '(', 'sc', ',', 'numRows', ',', 'numCols', ',', 'numPartitions', '=', 'None', ',', 'seed', '=', 'None', ')', ':', 'return', 'callMLlibFunc', '(', '"uniformVectorRDD"', ',', 'sc', '.', '_jsc', ',', 'numRows', ',', 'numCols', ',', 'numPartitions', ',', 'seed', ')']
Generates an RDD comprised of vectors containing i.i.d. samples drawn from the uniform distribution U(0.0, 1.0). :param sc: SparkContext used to create the RDD. :param numRows: Number of Vectors in the RDD. :param numCols: Number of elements in each Vector. :param numPartitions: Number of partitions in the RDD. :param seed: Seed for the RNG that generates the seed for the generator in each partition. :return: RDD of Vector with vectors containing i.i.d samples ~ `U(0.0, 1.0)`. >>> import numpy as np >>> mat = np.matrix(RandomRDDs.uniformVectorRDD(sc, 10, 10).collect()) >>> mat.shape (10, 10) >>> mat.max() <= 1.0 and mat.min() >= 0.0 True >>> RandomRDDs.uniformVectorRDD(sc, 10, 10, 4).getNumPartitions() 4
['Generates', 'an', 'RDD', 'comprised', 'of', 'vectors', 'containing', 'i', '.', 'i', '.', 'd', '.', 'samples', 'drawn', 'from', 'the', 'uniform', 'distribution', 'U', '(', '0', '.', '0', '1', '.', '0', ')', '.']
train
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/mllib/random.py#L230-L251
214
etcher-be/epab
epab/utils/_repo.py
Repo.changed_files
def changed_files(self) -> typing.List[str]: """ :return: changed files :rtype: list of str """ changed_files: typing.List[str] = [x.a_path for x in self.repo.index.diff(None)] LOGGER.debug('changed files: %s', changed_files) return changed_files
python
def changed_files(self) -> typing.List[str]: """ :return: changed files :rtype: list of str """ changed_files: typing.List[str] = [x.a_path for x in self.repo.index.diff(None)] LOGGER.debug('changed files: %s', changed_files) return changed_files
['def', 'changed_files', '(', 'self', ')', '->', 'typing', '.', 'List', '[', 'str', ']', ':', 'changed_files', ':', 'typing', '.', 'List', '[', 'str', ']', '=', '[', 'x', '.', 'a_path', 'for', 'x', 'in', 'self', '.', 'repo', '.', 'index', '.', 'diff', '(', 'None', ')', ']', 'LOGGER', '.', 'debug', '(', "'changed files: %s'", ',', 'changed_files', ')', 'return', 'changed_files']
:return: changed files :rtype: list of str
[':', 'return', ':', 'changed', 'files', ':', 'rtype', ':', 'list', 'of', 'str']
train
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_repo.py#L230-L237
215
ui/django-post_office
post_office/models.py
get_upload_path
def get_upload_path(instance, filename): """Overriding to store the original filename""" if not instance.name: instance.name = filename # set original filename date = timezone.now().date() filename = '{name}.{ext}'.format(name=uuid4().hex, ext=filename.split('.')[-1]) return os.path.join('post_office_attachments', str(date.year), str(date.month), str(date.day), filename)
python
def get_upload_path(instance, filename): """Overriding to store the original filename""" if not instance.name: instance.name = filename # set original filename date = timezone.now().date() filename = '{name}.{ext}'.format(name=uuid4().hex, ext=filename.split('.')[-1]) return os.path.join('post_office_attachments', str(date.year), str(date.month), str(date.day), filename)
['def', 'get_upload_path', '(', 'instance', ',', 'filename', ')', ':', 'if', 'not', 'instance', '.', 'name', ':', 'instance', '.', 'name', '=', 'filename', '# set original filename', 'date', '=', 'timezone', '.', 'now', '(', ')', '.', 'date', '(', ')', 'filename', '=', "'{name}.{ext}'", '.', 'format', '(', 'name', '=', 'uuid4', '(', ')', '.', 'hex', ',', 'ext', '=', 'filename', '.', 'split', '(', "'.'", ')', '[', '-', '1', ']', ')', 'return', 'os', '.', 'path', '.', 'join', '(', "'post_office_attachments'", ',', 'str', '(', 'date', '.', 'year', ')', ',', 'str', '(', 'date', '.', 'month', ')', ',', 'str', '(', 'date', '.', 'day', ')', ',', 'filename', ')']
Overriding to store the original filename
['Overriding', 'to', 'store', 'the', 'original', 'filename']
train
https://github.com/ui/django-post_office/blob/03e1ffb69829b475402f0f3ecd9f8a90af7da4bd/post_office/models.py#L274-L283
216
django-fluent/django-fluent-blogs
fluent_blogs/sitemaps.py
AuthorArchiveSitemap.lastmod
def lastmod(self, author): """Return the last modification of the entry.""" lastitems = EntryModel.objects.published().order_by('-modification_date').filter(author=author).only('modification_date') return lastitems[0].modification_date
python
def lastmod(self, author): """Return the last modification of the entry.""" lastitems = EntryModel.objects.published().order_by('-modification_date').filter(author=author).only('modification_date') return lastitems[0].modification_date
['def', 'lastmod', '(', 'self', ',', 'author', ')', ':', 'lastitems', '=', 'EntryModel', '.', 'objects', '.', 'published', '(', ')', '.', 'order_by', '(', "'-modification_date'", ')', '.', 'filter', '(', 'author', '=', 'author', ')', '.', 'only', '(', "'modification_date'", ')', 'return', 'lastitems', '[', '0', ']', '.', 'modification_date']
Return the last modification of the entry.
['Return', 'the', 'last', 'modification', 'of', 'the', 'entry', '.']
train
https://github.com/django-fluent/django-fluent-blogs/blob/86b148549a010eaca9a2ea987fe43be250e06c50/fluent_blogs/sitemaps.py#L60-L63
217
watson-developer-cloud/python-sdk
ibm_watson/speech_to_text_v1.py
Grammars._to_dict
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'grammars') and self.grammars is not None: _dict['grammars'] = [x._to_dict() for x in self.grammars] return _dict
python
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'grammars') and self.grammars is not None: _dict['grammars'] = [x._to_dict() for x in self.grammars] return _dict
['def', '_to_dict', '(', 'self', ')', ':', '_dict', '=', '{', '}', 'if', 'hasattr', '(', 'self', ',', "'grammars'", ')', 'and', 'self', '.', 'grammars', 'is', 'not', 'None', ':', '_dict', '[', "'grammars'", ']', '=', '[', 'x', '.', '_to_dict', '(', ')', 'for', 'x', 'in', 'self', '.', 'grammars', ']', 'return', '_dict']
Return a json dictionary representing this model.
['Return', 'a', 'json', 'dictionary', 'representing', 'this', 'model', '.']
train
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/speech_to_text_v1.py#L3846-L3851
218
mpg-age-bioinformatics/AGEpy
AGEpy/gtf.py
GetTransPosition
def GetTransPosition(df,field,dic,refCol="transcript_id"): """ Maps a genome position to transcript positon" :param df: a Pandas dataframe :param field: the head of the column containing the genomic position :param dic: a dictionary containing for each transcript the respective bases eg. {ENST23923910:'234,235,236,1021,..'} :param refCol: header of the reference column with IDs, eg. 'transcript_id' :returns: position on transcript """ try: gen=str(int(df[field])) transid=df[refCol] bases=dic.get(transid).split(",") bases=bases.index(str(gen))+1 except: bases=np.nan return bases
python
def GetTransPosition(df,field,dic,refCol="transcript_id"): """ Maps a genome position to transcript positon" :param df: a Pandas dataframe :param field: the head of the column containing the genomic position :param dic: a dictionary containing for each transcript the respective bases eg. {ENST23923910:'234,235,236,1021,..'} :param refCol: header of the reference column with IDs, eg. 'transcript_id' :returns: position on transcript """ try: gen=str(int(df[field])) transid=df[refCol] bases=dic.get(transid).split(",") bases=bases.index(str(gen))+1 except: bases=np.nan return bases
['def', 'GetTransPosition', '(', 'df', ',', 'field', ',', 'dic', ',', 'refCol', '=', '"transcript_id"', ')', ':', 'try', ':', 'gen', '=', 'str', '(', 'int', '(', 'df', '[', 'field', ']', ')', ')', 'transid', '=', 'df', '[', 'refCol', ']', 'bases', '=', 'dic', '.', 'get', '(', 'transid', ')', '.', 'split', '(', '","', ')', 'bases', '=', 'bases', '.', 'index', '(', 'str', '(', 'gen', ')', ')', '+', '1', 'except', ':', 'bases', '=', 'np', '.', 'nan', 'return', 'bases']
Maps a genome position to transcript positon" :param df: a Pandas dataframe :param field: the head of the column containing the genomic position :param dic: a dictionary containing for each transcript the respective bases eg. {ENST23923910:'234,235,236,1021,..'} :param refCol: header of the reference column with IDs, eg. 'transcript_id' :returns: position on transcript
['Maps', 'a', 'genome', 'position', 'to', 'transcript', 'positon']
train
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/gtf.py#L163-L181
219
teepark/greenhouse
greenhouse/emulation/__init__.py
patched_context
def patched_context(*module_names, **kwargs): """apply emulation patches only for a specific context :param module_names: var-args for the modules to patch, as in :func:`patch` :param local: if True, unpatching is done on every switch-out, and re-patching on every switch-in, so that they are only applied for the one coroutine :returns: a contextmanager that patches on ``__enter__`` and unpatches on ``__exit__`` """ local = kwargs.pop('local', False) if kwargs: raise TypeError("patched_context() got an unexpected keyword " + "argument %r" % kwargs.keys()[0]) patch(*module_names) if local: @scheduler.local_incoming_hook @scheduler.local_outgoing_hook def hook(direction, target): {1: patch, 2: unpatch}[direction](*module_names) yield unpatch(*module_names) if local: scheduler.remove_local_incoming_hook(hook) scheduler.remove_local_outgoing_hook(hook)
python
def patched_context(*module_names, **kwargs): """apply emulation patches only for a specific context :param module_names: var-args for the modules to patch, as in :func:`patch` :param local: if True, unpatching is done on every switch-out, and re-patching on every switch-in, so that they are only applied for the one coroutine :returns: a contextmanager that patches on ``__enter__`` and unpatches on ``__exit__`` """ local = kwargs.pop('local', False) if kwargs: raise TypeError("patched_context() got an unexpected keyword " + "argument %r" % kwargs.keys()[0]) patch(*module_names) if local: @scheduler.local_incoming_hook @scheduler.local_outgoing_hook def hook(direction, target): {1: patch, 2: unpatch}[direction](*module_names) yield unpatch(*module_names) if local: scheduler.remove_local_incoming_hook(hook) scheduler.remove_local_outgoing_hook(hook)
['def', 'patched_context', '(', '*', 'module_names', ',', '*', '*', 'kwargs', ')', ':', 'local', '=', 'kwargs', '.', 'pop', '(', "'local'", ',', 'False', ')', 'if', 'kwargs', ':', 'raise', 'TypeError', '(', '"patched_context() got an unexpected keyword "', '+', '"argument %r"', '%', 'kwargs', '.', 'keys', '(', ')', '[', '0', ']', ')', 'patch', '(', '*', 'module_names', ')', 'if', 'local', ':', '@', 'scheduler', '.', 'local_incoming_hook', '@', 'scheduler', '.', 'local_outgoing_hook', 'def', 'hook', '(', 'direction', ',', 'target', ')', ':', '{', '1', ':', 'patch', ',', '2', ':', 'unpatch', '}', '[', 'direction', ']', '(', '*', 'module_names', ')', 'yield', 'unpatch', '(', '*', 'module_names', ')', 'if', 'local', ':', 'scheduler', '.', 'remove_local_incoming_hook', '(', 'hook', ')', 'scheduler', '.', 'remove_local_outgoing_hook', '(', 'hook', ')']
apply emulation patches only for a specific context :param module_names: var-args for the modules to patch, as in :func:`patch` :param local: if True, unpatching is done on every switch-out, and re-patching on every switch-in, so that they are only applied for the one coroutine :returns: a contextmanager that patches on ``__enter__`` and unpatches on ``__exit__``
['apply', 'emulation', 'patches', 'only', 'for', 'a', 'specific', 'context']
train
https://github.com/teepark/greenhouse/blob/8fd1be4f5443ba090346b5ec82fdbeb0a060d956/greenhouse/emulation/__init__.py#L76-L105
220
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_lag.py
brocade_lag.get_portchannel_info_by_intf_output_lacp_oper_key
def get_portchannel_info_by_intf_output_lacp_oper_key(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf") config = get_portchannel_info_by_intf output = ET.SubElement(get_portchannel_info_by_intf, "output") lacp = ET.SubElement(output, "lacp") oper_key = ET.SubElement(lacp, "oper-key") oper_key.text = kwargs.pop('oper_key') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def get_portchannel_info_by_intf_output_lacp_oper_key(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf") config = get_portchannel_info_by_intf output = ET.SubElement(get_portchannel_info_by_intf, "output") lacp = ET.SubElement(output, "lacp") oper_key = ET.SubElement(lacp, "oper-key") oper_key.text = kwargs.pop('oper_key') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'get_portchannel_info_by_intf_output_lacp_oper_key', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_portchannel_info_by_intf', '=', 'ET', '.', 'Element', '(', '"get_portchannel_info_by_intf"', ')', 'config', '=', 'get_portchannel_info_by_intf', 'output', '=', 'ET', '.', 'SubElement', '(', 'get_portchannel_info_by_intf', ',', '"output"', ')', 'lacp', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"lacp"', ')', 'oper_key', '=', 'ET', '.', 'SubElement', '(', 'lacp', ',', '"oper-key"', ')', 'oper_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'oper_key'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_lag.py#L422-L434
221
mitsei/dlkit
dlkit/handcar/learning/managers.py
LearningManager.get_activity_search_session_for_objective_bank
def get_activity_search_session_for_objective_bank(self, objective_bank_id=None): """Gets the OsidSession associated with the activity search service for the given objective bank. arg: objectiveBankId (osid.id.Id): the Id of the objective bank return: (osid.learning.ActivitySearchSession) - an ActivitySearchSession raise: NotFound - objectiveBankId not found raise: NullArgument - objectiveBankId is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_activity_search() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_activity_search() and supports_visible_federation() are true. """ if not objective_bank_id: raise NullArgument if not self.supports_activity_search(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() try: session = sessions.ActivitySearchSession(objective_bank_id, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
python
def get_activity_search_session_for_objective_bank(self, objective_bank_id=None): """Gets the OsidSession associated with the activity search service for the given objective bank. arg: objectiveBankId (osid.id.Id): the Id of the objective bank return: (osid.learning.ActivitySearchSession) - an ActivitySearchSession raise: NotFound - objectiveBankId not found raise: NullArgument - objectiveBankId is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_activity_search() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_activity_search() and supports_visible_federation() are true. """ if not objective_bank_id: raise NullArgument if not self.supports_activity_search(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() try: session = sessions.ActivitySearchSession(objective_bank_id, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
['def', 'get_activity_search_session_for_objective_bank', '(', 'self', ',', 'objective_bank_id', '=', 'None', ')', ':', 'if', 'not', 'objective_bank_id', ':', 'raise', 'NullArgument', 'if', 'not', 'self', '.', 'supports_activity_search', '(', ')', ':', 'raise', 'Unimplemented', '(', ')', 'try', ':', 'from', '.', 'import', 'sessions', 'except', 'ImportError', ':', 'raise', 'OperationFailed', '(', ')', 'try', ':', 'session', '=', 'sessions', '.', 'ActivitySearchSession', '(', 'objective_bank_id', ',', 'runtime', '=', 'self', '.', '_runtime', ')', 'except', 'AttributeError', ':', 'raise', 'OperationFailed', '(', ')', 'return', 'session']
Gets the OsidSession associated with the activity search service for the given objective bank. arg: objectiveBankId (osid.id.Id): the Id of the objective bank return: (osid.learning.ActivitySearchSession) - an ActivitySearchSession raise: NotFound - objectiveBankId not found raise: NullArgument - objectiveBankId is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_activity_search() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_activity_search() and supports_visible_federation() are true.
['Gets', 'the', 'OsidSession', 'associated', 'with', 'the', 'activity', 'search', 'service', 'for', 'the', 'given', 'objective', 'bank', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/learning/managers.py#L1439-L1469
222
python273/telegraph
telegraph/api.py
Telegraph.edit_account_info
def edit_account_info(self, short_name=None, author_name=None, author_url=None): """ Update information about a Telegraph account. Pass only the parameters that you want to edit :param short_name: Account name, helps users with several accounts remember which they are currently using. Displayed to the user above the "Edit/Publish" button on Telegra.ph, other users don't see this name :param author_name: Default author name used when creating new articles :param author_url: Default profile link, opened when users click on the author's name below the title. Can be any link, not necessarily to a Telegram profile or channels """ return self._telegraph.method('editAccountInfo', values={ 'short_name': short_name, 'author_name': author_name, 'author_url': author_url })
python
def edit_account_info(self, short_name=None, author_name=None, author_url=None): """ Update information about a Telegraph account. Pass only the parameters that you want to edit :param short_name: Account name, helps users with several accounts remember which they are currently using. Displayed to the user above the "Edit/Publish" button on Telegra.ph, other users don't see this name :param author_name: Default author name used when creating new articles :param author_url: Default profile link, opened when users click on the author's name below the title. Can be any link, not necessarily to a Telegram profile or channels """ return self._telegraph.method('editAccountInfo', values={ 'short_name': short_name, 'author_name': author_name, 'author_url': author_url })
['def', 'edit_account_info', '(', 'self', ',', 'short_name', '=', 'None', ',', 'author_name', '=', 'None', ',', 'author_url', '=', 'None', ')', ':', 'return', 'self', '.', '_telegraph', '.', 'method', '(', "'editAccountInfo'", ',', 'values', '=', '{', "'short_name'", ':', 'short_name', ',', "'author_name'", ':', 'author_name', ',', "'author_url'", ':', 'author_url', '}', ')']
Update information about a Telegraph account. Pass only the parameters that you want to edit :param short_name: Account name, helps users with several accounts remember which they are currently using. Displayed to the user above the "Edit/Publish" button on Telegra.ph, other users don't see this name :param author_name: Default author name used when creating new articles :param author_url: Default profile link, opened when users click on the author's name below the title. Can be any link, not necessarily to a Telegram profile or channels
['Update', 'information', 'about', 'a', 'Telegraph', 'account', '.', 'Pass', 'only', 'the', 'parameters', 'that', 'you', 'want', 'to', 'edit']
train
https://github.com/python273/telegraph/blob/6d45cd6bbae4fdbd85b48ce32626f3c66e9e5ddc/telegraph/api.py#L86-L107
223
pyviz/holoviews
holoviews/core/spaces.py
HoloMap.split_overlays
def split_overlays(self): "Deprecated method to split overlays inside the HoloMap." if util.config.future_deprecations: self.param.warning("split_overlays is deprecated and is now " "a private method.") return self._split_overlays()
python
def split_overlays(self): "Deprecated method to split overlays inside the HoloMap." if util.config.future_deprecations: self.param.warning("split_overlays is deprecated and is now " "a private method.") return self._split_overlays()
['def', 'split_overlays', '(', 'self', ')', ':', 'if', 'util', '.', 'config', '.', 'future_deprecations', ':', 'self', '.', 'param', '.', 'warning', '(', '"split_overlays is deprecated and is now "', '"a private method."', ')', 'return', 'self', '.', '_split_overlays', '(', ')']
Deprecated method to split overlays inside the HoloMap.
['Deprecated', 'method', 'to', 'split', 'overlays', 'inside', 'the', 'HoloMap', '.']
train
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/spaces.py#L144-L149
224
manns/pyspread
pyspread/src/lib/vlc.py
libvlc_video_get_spu
def libvlc_video_get_spu(p_mi): '''Get current video subtitle. @param p_mi: the media player. @return: the video subtitle selected, or -1 if none. ''' f = _Cfunctions.get('libvlc_video_get_spu', None) or \ _Cfunction('libvlc_video_get_spu', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi)
python
def libvlc_video_get_spu(p_mi): '''Get current video subtitle. @param p_mi: the media player. @return: the video subtitle selected, or -1 if none. ''' f = _Cfunctions.get('libvlc_video_get_spu', None) or \ _Cfunction('libvlc_video_get_spu', ((1,),), None, ctypes.c_int, MediaPlayer) return f(p_mi)
['def', 'libvlc_video_get_spu', '(', 'p_mi', ')', ':', 'f', '=', '_Cfunctions', '.', 'get', '(', "'libvlc_video_get_spu'", ',', 'None', ')', 'or', '_Cfunction', '(', "'libvlc_video_get_spu'", ',', '(', '(', '1', ',', ')', ',', ')', ',', 'None', ',', 'ctypes', '.', 'c_int', ',', 'MediaPlayer', ')', 'return', 'f', '(', 'p_mi', ')']
Get current video subtitle. @param p_mi: the media player. @return: the video subtitle selected, or -1 if none.
['Get', 'current', 'video', 'subtitle', '.']
train
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L5696-L5704
225
rckclmbr/pyportify
pyportify/pkcs1/primes.py
is_prime
def is_prime(n, rnd=default_pseudo_random, k=DEFAULT_ITERATION, algorithm=None): '''Test if n is a prime number m - the integer to test rnd - the random number generator to use for the probalistic primality algorithms, k - the number of iterations to use for the probabilistic primality algorithms, algorithm - the primality algorithm to use, default is Miller-Rabin. The gmpy implementation is used if gmpy is installed. Return value: True is n seems prime, False otherwise. ''' if algorithm is None: algorithm = PRIME_ALGO if algorithm == 'gmpy-miller-rabin': if not gmpy: raise NotImplementedError return gmpy.is_prime(n, k) elif algorithm == 'miller-rabin': # miller rabin probability of primality is 1/4**k return miller_rabin(n, k, rnd=rnd) elif algorithm == 'solovay-strassen': # for jacobi it's 1/2**k return randomized_primality_testing(n, rnd=rnd, k=k*2) else: raise NotImplementedError
python
def is_prime(n, rnd=default_pseudo_random, k=DEFAULT_ITERATION, algorithm=None): '''Test if n is a prime number m - the integer to test rnd - the random number generator to use for the probalistic primality algorithms, k - the number of iterations to use for the probabilistic primality algorithms, algorithm - the primality algorithm to use, default is Miller-Rabin. The gmpy implementation is used if gmpy is installed. Return value: True is n seems prime, False otherwise. ''' if algorithm is None: algorithm = PRIME_ALGO if algorithm == 'gmpy-miller-rabin': if not gmpy: raise NotImplementedError return gmpy.is_prime(n, k) elif algorithm == 'miller-rabin': # miller rabin probability of primality is 1/4**k return miller_rabin(n, k, rnd=rnd) elif algorithm == 'solovay-strassen': # for jacobi it's 1/2**k return randomized_primality_testing(n, rnd=rnd, k=k*2) else: raise NotImplementedError
['def', 'is_prime', '(', 'n', ',', 'rnd', '=', 'default_pseudo_random', ',', 'k', '=', 'DEFAULT_ITERATION', ',', 'algorithm', '=', 'None', ')', ':', 'if', 'algorithm', 'is', 'None', ':', 'algorithm', '=', 'PRIME_ALGO', 'if', 'algorithm', '==', "'gmpy-miller-rabin'", ':', 'if', 'not', 'gmpy', ':', 'raise', 'NotImplementedError', 'return', 'gmpy', '.', 'is_prime', '(', 'n', ',', 'k', ')', 'elif', 'algorithm', '==', "'miller-rabin'", ':', '# miller rabin probability of primality is 1/4**k', 'return', 'miller_rabin', '(', 'n', ',', 'k', ',', 'rnd', '=', 'rnd', ')', 'elif', 'algorithm', '==', "'solovay-strassen'", ':', "# for jacobi it's 1/2**k", 'return', 'randomized_primality_testing', '(', 'n', ',', 'rnd', '=', 'rnd', ',', 'k', '=', 'k', '*', '2', ')', 'else', ':', 'raise', 'NotImplementedError']
Test if n is a prime number m - the integer to test rnd - the random number generator to use for the probalistic primality algorithms, k - the number of iterations to use for the probabilistic primality algorithms, algorithm - the primality algorithm to use, default is Miller-Rabin. The gmpy implementation is used if gmpy is installed. Return value: True is n seems prime, False otherwise.
['Test', 'if', 'n', 'is', 'a', 'prime', 'number']
train
https://github.com/rckclmbr/pyportify/blob/696a1caad8a47b191f3bec44cc8fc3c437779512/pyportify/pkcs1/primes.py#L19-L47
226
d0c-s4vage/pfp
pfp/native/compat_tools.py
FindAll
def FindAll(params, ctxt, scope, stream, coord, interp): """ This function converts the argument data into a set of hex bytes and then searches the current file for all occurrences of those bytes. data may be any of the basic types or an array of one of the types. If data is an array of signed bytes, it is assumed to be a null-terminated string. To search for an array of hex bytes, create an unsigned char array and fill it with the target value. If the type being search for is a string, the matchcase and wholeworld arguments can be used to control the search (see Using Find for more information). method controls which search method is used from the following options: FINDMETHOD_NORMAL=0 - a normal search FINDMETHOD_WILDCARDS=1 - when searching for strings use wildcards '*' or '?' FINDMETHOD_REGEX=2 - when searching for strings use Regular Expressions wildcardMatchLength indicates the maximum number of characters a '*' can match when searching using wildcards. If the target is a float or double, the tolerance argument indicates that values that are only off by the tolerance value still match. If dir is 1 the find direction is down and if dir is 0 the find direction is up. start and size can be used to limit the area of the file that is searched. start is the starting byte address in the file where the search will begin and size is the number of bytes after start that will be searched. If size is zero, the file will be searched from start to the end of the file. The return value is a TFindResults structure. This structure contains a count variable indicating the number of matches, and a start array holding an array of starting positions, plus a size array which holds an array of target lengths. For example, use the following code to find all occurrences of the ASCII string "Test" in a file: """ matches_iter = _find_helper(params, ctxt, scope, stream, coord, interp) matches = list(matches_iter) types = interp.get_types() res = types.TFindResults() res.count = len(matches) # python3 map doesn't return a list starts = list(map(lambda m: m.start()+FIND_MATCHES_START_OFFSET, matches)) res.start = starts # python3 map doesn't return a list sizes = list(map(lambda m: m.end()-m.start(), matches)) res.size = sizes return res
python
def FindAll(params, ctxt, scope, stream, coord, interp): """ This function converts the argument data into a set of hex bytes and then searches the current file for all occurrences of those bytes. data may be any of the basic types or an array of one of the types. If data is an array of signed bytes, it is assumed to be a null-terminated string. To search for an array of hex bytes, create an unsigned char array and fill it with the target value. If the type being search for is a string, the matchcase and wholeworld arguments can be used to control the search (see Using Find for more information). method controls which search method is used from the following options: FINDMETHOD_NORMAL=0 - a normal search FINDMETHOD_WILDCARDS=1 - when searching for strings use wildcards '*' or '?' FINDMETHOD_REGEX=2 - when searching for strings use Regular Expressions wildcardMatchLength indicates the maximum number of characters a '*' can match when searching using wildcards. If the target is a float or double, the tolerance argument indicates that values that are only off by the tolerance value still match. If dir is 1 the find direction is down and if dir is 0 the find direction is up. start and size can be used to limit the area of the file that is searched. start is the starting byte address in the file where the search will begin and size is the number of bytes after start that will be searched. If size is zero, the file will be searched from start to the end of the file. The return value is a TFindResults structure. This structure contains a count variable indicating the number of matches, and a start array holding an array of starting positions, plus a size array which holds an array of target lengths. For example, use the following code to find all occurrences of the ASCII string "Test" in a file: """ matches_iter = _find_helper(params, ctxt, scope, stream, coord, interp) matches = list(matches_iter) types = interp.get_types() res = types.TFindResults() res.count = len(matches) # python3 map doesn't return a list starts = list(map(lambda m: m.start()+FIND_MATCHES_START_OFFSET, matches)) res.start = starts # python3 map doesn't return a list sizes = list(map(lambda m: m.end()-m.start(), matches)) res.size = sizes return res
['def', 'FindAll', '(', 'params', ',', 'ctxt', ',', 'scope', ',', 'stream', ',', 'coord', ',', 'interp', ')', ':', 'matches_iter', '=', '_find_helper', '(', 'params', ',', 'ctxt', ',', 'scope', ',', 'stream', ',', 'coord', ',', 'interp', ')', 'matches', '=', 'list', '(', 'matches_iter', ')', 'types', '=', 'interp', '.', 'get_types', '(', ')', 'res', '=', 'types', '.', 'TFindResults', '(', ')', 'res', '.', 'count', '=', 'len', '(', 'matches', ')', "# python3 map doesn't return a list", 'starts', '=', 'list', '(', 'map', '(', 'lambda', 'm', ':', 'm', '.', 'start', '(', ')', '+', 'FIND_MATCHES_START_OFFSET', ',', 'matches', ')', ')', 'res', '.', 'start', '=', 'starts', "# python3 map doesn't return a list", 'sizes', '=', 'list', '(', 'map', '(', 'lambda', 'm', ':', 'm', '.', 'end', '(', ')', '-', 'm', '.', 'start', '(', ')', ',', 'matches', ')', ')', 'res', '.', 'size', '=', 'sizes', 'return', 'res']
This function converts the argument data into a set of hex bytes and then searches the current file for all occurrences of those bytes. data may be any of the basic types or an array of one of the types. If data is an array of signed bytes, it is assumed to be a null-terminated string. To search for an array of hex bytes, create an unsigned char array and fill it with the target value. If the type being search for is a string, the matchcase and wholeworld arguments can be used to control the search (see Using Find for more information). method controls which search method is used from the following options: FINDMETHOD_NORMAL=0 - a normal search FINDMETHOD_WILDCARDS=1 - when searching for strings use wildcards '*' or '?' FINDMETHOD_REGEX=2 - when searching for strings use Regular Expressions wildcardMatchLength indicates the maximum number of characters a '*' can match when searching using wildcards. If the target is a float or double, the tolerance argument indicates that values that are only off by the tolerance value still match. If dir is 1 the find direction is down and if dir is 0 the find direction is up. start and size can be used to limit the area of the file that is searched. start is the starting byte address in the file where the search will begin and size is the number of bytes after start that will be searched. If size is zero, the file will be searched from start to the end of the file. The return value is a TFindResults structure. This structure contains a count variable indicating the number of matches, and a start array holding an array of starting positions, plus a size array which holds an array of target lengths. For example, use the following code to find all occurrences of the ASCII string "Test" in a file:
['This', 'function', 'converts', 'the', 'argument', 'data', 'into', 'a', 'set', 'of', 'hex', 'bytes', 'and', 'then', 'searches', 'the', 'current', 'file', 'for', 'all', 'occurrences', 'of', 'those', 'bytes', '.', 'data', 'may', 'be', 'any', 'of', 'the', 'basic', 'types', 'or', 'an', 'array', 'of', 'one', 'of', 'the', 'types', '.', 'If', 'data', 'is', 'an', 'array', 'of', 'signed', 'bytes', 'it', 'is', 'assumed', 'to', 'be', 'a', 'null', '-', 'terminated', 'string', '.', 'To', 'search', 'for', 'an', 'array', 'of', 'hex', 'bytes', 'create', 'an', 'unsigned', 'char', 'array', 'and', 'fill', 'it', 'with', 'the', 'target', 'value', '.', 'If', 'the', 'type', 'being', 'search', 'for', 'is', 'a', 'string', 'the', 'matchcase', 'and', 'wholeworld', 'arguments', 'can', 'be', 'used', 'to', 'control', 'the', 'search', '(', 'see', 'Using', 'Find', 'for', 'more', 'information', ')', '.', 'method', 'controls', 'which', 'search', 'method', 'is', 'used', 'from', 'the', 'following', 'options', ':']
train
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/native/compat_tools.py#L537-L575
227
radjkarl/imgProcessor
imgProcessor/equations/vignetting.py
vignetting
def vignetting(xy, f=100, alpha=0, rot=0, tilt=0, cx=50, cy=50): ''' Vignetting equation using the KANG-WEISS-MODEL see http://research.microsoft.com/en-us/um/people/sbkang/publications/eccv00.pdf f - focal length alpha - coefficient in the geometric vignetting factor tilt - tilt angle of a planar scene rot - rotation angle of a planar scene cx - image center, x cy - image center, y ''' x, y = xy # distance to image center: dist = ((x - cx)**2 + (y - cy)**2)**0.5 # OFF_AXIS ILLUMINATION FACTOR: A = 1.0 / (1 + (dist / f)**2)**2 # GEOMETRIC FACTOR: if alpha != 0: G = (1 - alpha * dist) else: G = 1 # TILT FACTOR: if tilt != 0: T = tiltFactor((x, y), f, tilt, rot, (cy, cx)) else: T = 1 return A * G * T
python
def vignetting(xy, f=100, alpha=0, rot=0, tilt=0, cx=50, cy=50): ''' Vignetting equation using the KANG-WEISS-MODEL see http://research.microsoft.com/en-us/um/people/sbkang/publications/eccv00.pdf f - focal length alpha - coefficient in the geometric vignetting factor tilt - tilt angle of a planar scene rot - rotation angle of a planar scene cx - image center, x cy - image center, y ''' x, y = xy # distance to image center: dist = ((x - cx)**2 + (y - cy)**2)**0.5 # OFF_AXIS ILLUMINATION FACTOR: A = 1.0 / (1 + (dist / f)**2)**2 # GEOMETRIC FACTOR: if alpha != 0: G = (1 - alpha * dist) else: G = 1 # TILT FACTOR: if tilt != 0: T = tiltFactor((x, y), f, tilt, rot, (cy, cx)) else: T = 1 return A * G * T
['def', 'vignetting', '(', 'xy', ',', 'f', '=', '100', ',', 'alpha', '=', '0', ',', 'rot', '=', '0', ',', 'tilt', '=', '0', ',', 'cx', '=', '50', ',', 'cy', '=', '50', ')', ':', 'x', ',', 'y', '=', 'xy', '# distance to image center:\r', 'dist', '=', '(', '(', 'x', '-', 'cx', ')', '**', '2', '+', '(', 'y', '-', 'cy', ')', '**', '2', ')', '**', '0.5', '# OFF_AXIS ILLUMINATION FACTOR:\r', 'A', '=', '1.0', '/', '(', '1', '+', '(', 'dist', '/', 'f', ')', '**', '2', ')', '**', '2', '# GEOMETRIC FACTOR:\r', 'if', 'alpha', '!=', '0', ':', 'G', '=', '(', '1', '-', 'alpha', '*', 'dist', ')', 'else', ':', 'G', '=', '1', '# TILT FACTOR:\r', 'if', 'tilt', '!=', '0', ':', 'T', '=', 'tiltFactor', '(', '(', 'x', ',', 'y', ')', ',', 'f', ',', 'tilt', ',', 'rot', ',', '(', 'cy', ',', 'cx', ')', ')', 'else', ':', 'T', '=', '1', 'return', 'A', '*', 'G', '*', 'T']
Vignetting equation using the KANG-WEISS-MODEL see http://research.microsoft.com/en-us/um/people/sbkang/publications/eccv00.pdf f - focal length alpha - coefficient in the geometric vignetting factor tilt - tilt angle of a planar scene rot - rotation angle of a planar scene cx - image center, x cy - image center, y
['Vignetting', 'equation', 'using', 'the', 'KANG', '-', 'WEISS', '-', 'MODEL', 'see', 'http', ':', '//', 'research', '.', 'microsoft', '.', 'com', '/', 'en', '-', 'us', '/', 'um', '/', 'people', '/', 'sbkang', '/', 'publications', '/', 'eccv00', '.', 'pdf', 'f', '-', 'focal', 'length', 'alpha', '-', 'coefficient', 'in', 'the', 'geometric', 'vignetting', 'factor', 'tilt', '-', 'tilt', 'angle', 'of', 'a', 'planar', 'scene', 'rot', '-', 'rotation', 'angle', 'of', 'a', 'planar', 'scene', 'cx', '-', 'image', 'center', 'x', 'cy', '-', 'image', 'center', 'y']
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/equations/vignetting.py#L9-L37
228
corpusops/pdbclone
lib/pdb_clone/pdb.py
Pdb.do_debug
def do_debug(self, arg): """debug code Enter a recursive debugger that steps through the code argument (which is an arbitrary expression or statement to be executed in the current environment). """ self.settrace(False) globals = self.curframe.f_globals locals = self.get_locals(self.curframe) p = Pdb(self.completekey, self.stdin, self.stdout, debug=True) p.prompt = "(%s) " % self.prompt.strip() self.message("ENTERING RECURSIVE DEBUGGER") sys.call_tracing(p.run, (arg, globals, locals)) self.message("LEAVING RECURSIVE DEBUGGER") self.settrace(True) self.lastcmd = p.lastcmd
python
def do_debug(self, arg): """debug code Enter a recursive debugger that steps through the code argument (which is an arbitrary expression or statement to be executed in the current environment). """ self.settrace(False) globals = self.curframe.f_globals locals = self.get_locals(self.curframe) p = Pdb(self.completekey, self.stdin, self.stdout, debug=True) p.prompt = "(%s) " % self.prompt.strip() self.message("ENTERING RECURSIVE DEBUGGER") sys.call_tracing(p.run, (arg, globals, locals)) self.message("LEAVING RECURSIVE DEBUGGER") self.settrace(True) self.lastcmd = p.lastcmd
['def', 'do_debug', '(', 'self', ',', 'arg', ')', ':', 'self', '.', 'settrace', '(', 'False', ')', 'globals', '=', 'self', '.', 'curframe', '.', 'f_globals', 'locals', '=', 'self', '.', 'get_locals', '(', 'self', '.', 'curframe', ')', 'p', '=', 'Pdb', '(', 'self', '.', 'completekey', ',', 'self', '.', 'stdin', ',', 'self', '.', 'stdout', ',', 'debug', '=', 'True', ')', 'p', '.', 'prompt', '=', '"(%s) "', '%', 'self', '.', 'prompt', '.', 'strip', '(', ')', 'self', '.', 'message', '(', '"ENTERING RECURSIVE DEBUGGER"', ')', 'sys', '.', 'call_tracing', '(', 'p', '.', 'run', ',', '(', 'arg', ',', 'globals', ',', 'locals', ')', ')', 'self', '.', 'message', '(', '"LEAVING RECURSIVE DEBUGGER"', ')', 'self', '.', 'settrace', '(', 'True', ')', 'self', '.', 'lastcmd', '=', 'p', '.', 'lastcmd']
debug code Enter a recursive debugger that steps through the code argument (which is an arbitrary expression or statement to be executed in the current environment).
['debug', 'code', 'Enter', 'a', 'recursive', 'debugger', 'that', 'steps', 'through', 'the', 'code', 'argument', '(', 'which', 'is', 'an', 'arbitrary', 'expression', 'or', 'statement', 'to', 'be', 'executed', 'in', 'the', 'current', 'environment', ')', '.']
train
https://github.com/corpusops/pdbclone/blob/f781537c243a4874b246d43dbdef8c4279f0094d/lib/pdb_clone/pdb.py#L1320-L1335
229
mastro35/flows
flows/Actions/InputTailAction.py
TailAction.flush_buffer
def flush_buffer(self): ''' Flush the buffer of the tail ''' if len(self.buffer) > 0: return_value = ''.join(self.buffer) self.buffer.clear() self.send_message(return_value) self.last_flush_date = datetime.datetime.now()
python
def flush_buffer(self): ''' Flush the buffer of the tail ''' if len(self.buffer) > 0: return_value = ''.join(self.buffer) self.buffer.clear() self.send_message(return_value) self.last_flush_date = datetime.datetime.now()
['def', 'flush_buffer', '(', 'self', ')', ':', 'if', 'len', '(', 'self', '.', 'buffer', ')', '>', '0', ':', 'return_value', '=', "''", '.', 'join', '(', 'self', '.', 'buffer', ')', 'self', '.', 'buffer', '.', 'clear', '(', ')', 'self', '.', 'send_message', '(', 'return_value', ')', 'self', '.', 'last_flush_date', '=', 'datetime', '.', 'datetime', '.', 'now', '(', ')']
Flush the buffer of the tail
['Flush', 'the', 'buffer', 'of', 'the', 'tail']
train
https://github.com/mastro35/flows/blob/05e488385673a69597b5b39c7728795aa4d5eb18/flows/Actions/InputTailAction.py#L64-L70
230
urtdevs/yaurtww
yaurtww/manifest.py
Manifest._get_url
def _get_url(self, filename): """ Returns url for cdn.urbanterror.info to pass to _not_wget(). http://cdn.urbanterror.info/urt/<major_ver_without_.>/<release_num>-<magic_number>/q3ut4/<filename> """ return self.cdn_url.format(self.mver, self.relnum, filename)
python
def _get_url(self, filename): """ Returns url for cdn.urbanterror.info to pass to _not_wget(). http://cdn.urbanterror.info/urt/<major_ver_without_.>/<release_num>-<magic_number>/q3ut4/<filename> """ return self.cdn_url.format(self.mver, self.relnum, filename)
['def', '_get_url', '(', 'self', ',', 'filename', ')', ':', 'return', 'self', '.', 'cdn_url', '.', 'format', '(', 'self', '.', 'mver', ',', 'self', '.', 'relnum', ',', 'filename', ')']
Returns url for cdn.urbanterror.info to pass to _not_wget(). http://cdn.urbanterror.info/urt/<major_ver_without_.>/<release_num>-<magic_number>/q3ut4/<filename>
['Returns', 'url', 'for', 'cdn', '.', 'urbanterror', '.', 'info', 'to', 'pass', 'to', '_not_wget', '()', '.']
train
https://github.com/urtdevs/yaurtww/blob/842fbd1fb5d32c2be89df471591b70c767aebd14/yaurtww/manifest.py#L25-L31
231
mapbox/mapbox-cli-py
mapboxcli/scripts/directions.py
directions
def directions(ctx, features, profile, alternatives, geometries, overview, steps, continue_straight, waypoint_snapping, annotations, language, output): """The Mapbox Directions API will show you how to get where you're going. mapbox directions "[0, 0]" "[1, 1]" An access token is required. See "mapbox --help". """ access_token = (ctx.obj and ctx.obj.get("access_token")) or None service = mapbox.Directions(access_token=access_token) # The Directions SDK expects False to be # a bool, not a str. if overview == "False": overview = False # When using waypoint snapping, the # Directions SDK expects features to be # a list, not a generator. if waypoint_snapping is not None: features = list(features) if annotations: annotations = annotations.split(",") stdout = click.open_file(output, "w") try: res = service.directions( features, profile=profile, alternatives=alternatives, geometries=geometries, overview=overview, steps=steps, continue_straight=continue_straight, waypoint_snapping=waypoint_snapping, annotations=annotations, language=language ) except mapbox.errors.ValidationError as exc: raise click.BadParameter(str(exc)) if res.status_code == 200: if geometries == "geojson": click.echo(json.dumps(res.geojson()), file=stdout) else: click.echo(res.text, file=stdout) else: raise MapboxCLIException(res.text.strip())
python
def directions(ctx, features, profile, alternatives, geometries, overview, steps, continue_straight, waypoint_snapping, annotations, language, output): """The Mapbox Directions API will show you how to get where you're going. mapbox directions "[0, 0]" "[1, 1]" An access token is required. See "mapbox --help". """ access_token = (ctx.obj and ctx.obj.get("access_token")) or None service = mapbox.Directions(access_token=access_token) # The Directions SDK expects False to be # a bool, not a str. if overview == "False": overview = False # When using waypoint snapping, the # Directions SDK expects features to be # a list, not a generator. if waypoint_snapping is not None: features = list(features) if annotations: annotations = annotations.split(",") stdout = click.open_file(output, "w") try: res = service.directions( features, profile=profile, alternatives=alternatives, geometries=geometries, overview=overview, steps=steps, continue_straight=continue_straight, waypoint_snapping=waypoint_snapping, annotations=annotations, language=language ) except mapbox.errors.ValidationError as exc: raise click.BadParameter(str(exc)) if res.status_code == 200: if geometries == "geojson": click.echo(json.dumps(res.geojson()), file=stdout) else: click.echo(res.text, file=stdout) else: raise MapboxCLIException(res.text.strip())
['def', 'directions', '(', 'ctx', ',', 'features', ',', 'profile', ',', 'alternatives', ',', 'geometries', ',', 'overview', ',', 'steps', ',', 'continue_straight', ',', 'waypoint_snapping', ',', 'annotations', ',', 'language', ',', 'output', ')', ':', 'access_token', '=', '(', 'ctx', '.', 'obj', 'and', 'ctx', '.', 'obj', '.', 'get', '(', '"access_token"', ')', ')', 'or', 'None', 'service', '=', 'mapbox', '.', 'Directions', '(', 'access_token', '=', 'access_token', ')', '# The Directions SDK expects False to be', '# a bool, not a str.', 'if', 'overview', '==', '"False"', ':', 'overview', '=', 'False', '# When using waypoint snapping, the ', '# Directions SDK expects features to be ', '# a list, not a generator.', 'if', 'waypoint_snapping', 'is', 'not', 'None', ':', 'features', '=', 'list', '(', 'features', ')', 'if', 'annotations', ':', 'annotations', '=', 'annotations', '.', 'split', '(', '","', ')', 'stdout', '=', 'click', '.', 'open_file', '(', 'output', ',', '"w"', ')', 'try', ':', 'res', '=', 'service', '.', 'directions', '(', 'features', ',', 'profile', '=', 'profile', ',', 'alternatives', '=', 'alternatives', ',', 'geometries', '=', 'geometries', ',', 'overview', '=', 'overview', ',', 'steps', '=', 'steps', ',', 'continue_straight', '=', 'continue_straight', ',', 'waypoint_snapping', '=', 'waypoint_snapping', ',', 'annotations', '=', 'annotations', ',', 'language', '=', 'language', ')', 'except', 'mapbox', '.', 'errors', '.', 'ValidationError', 'as', 'exc', ':', 'raise', 'click', '.', 'BadParameter', '(', 'str', '(', 'exc', ')', ')', 'if', 'res', '.', 'status_code', '==', '200', ':', 'if', 'geometries', '==', '"geojson"', ':', 'click', '.', 'echo', '(', 'json', '.', 'dumps', '(', 'res', '.', 'geojson', '(', ')', ')', ',', 'file', '=', 'stdout', ')', 'else', ':', 'click', '.', 'echo', '(', 'res', '.', 'text', ',', 'file', '=', 'stdout', ')', 'else', ':', 'raise', 'MapboxCLIException', '(', 'res', '.', 'text', '.', 'strip', '(', ')', ')']
The Mapbox Directions API will show you how to get where you're going. mapbox directions "[0, 0]" "[1, 1]" An access token is required. See "mapbox --help".
['The', 'Mapbox', 'Directions', 'API', 'will', 'show', 'you', 'how', 'to', 'get', 'where', 'you', 're', 'going', '.']
train
https://github.com/mapbox/mapbox-cli-py/blob/b75544a2f83a4fda79d78b5673058e16e64a4f6d/mapboxcli/scripts/directions.py#L163-L218
232
vatlab/SoS
src/sos/actions.py
stop_if
def stop_if(expr, msg='', no_output=False): '''Abort the execution of the current step or loop and yield an warning message `msg` if `expr` is False ''' if expr: raise StopInputGroup(msg=msg, keep_output=not no_output) return 0
python
def stop_if(expr, msg='', no_output=False): '''Abort the execution of the current step or loop and yield an warning message `msg` if `expr` is False ''' if expr: raise StopInputGroup(msg=msg, keep_output=not no_output) return 0
['def', 'stop_if', '(', 'expr', ',', 'msg', '=', "''", ',', 'no_output', '=', 'False', ')', ':', 'if', 'expr', ':', 'raise', 'StopInputGroup', '(', 'msg', '=', 'msg', ',', 'keep_output', '=', 'not', 'no_output', ')', 'return', '0']
Abort the execution of the current step or loop and yield an warning message `msg` if `expr` is False
['Abort', 'the', 'execution', 'of', 'the', 'current', 'step', 'or', 'loop', 'and', 'yield', 'an', 'warning', 'message', 'msg', 'if', 'expr', 'is', 'False']
train
https://github.com/vatlab/SoS/blob/6b60ed0770916d135e17322e469520d778e9d4e7/src/sos/actions.py#L682-L687
233
gwastro/pycbc
pycbc/io/live.py
SingleCoincForGraceDB.save
def save(self, filename): """Write this trigger to gracedb compatible xml format Parameters ---------- filename: str Name of file to write to disk. """ gz = filename.endswith('.gz') ligolw_utils.write_filename(self.outdoc, filename, gz=gz)
python
def save(self, filename): """Write this trigger to gracedb compatible xml format Parameters ---------- filename: str Name of file to write to disk. """ gz = filename.endswith('.gz') ligolw_utils.write_filename(self.outdoc, filename, gz=gz)
['def', 'save', '(', 'self', ',', 'filename', ')', ':', 'gz', '=', 'filename', '.', 'endswith', '(', "'.gz'", ')', 'ligolw_utils', '.', 'write_filename', '(', 'self', '.', 'outdoc', ',', 'filename', ',', 'gz', '=', 'gz', ')']
Write this trigger to gracedb compatible xml format Parameters ---------- filename: str Name of file to write to disk.
['Write', 'this', 'trigger', 'to', 'gracedb', 'compatible', 'xml', 'format']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/live.py#L257-L266
234
pantsbuild/pants
src/python/pants/backend/jvm/ivy_utils.py
IvyUtils.generate_fetch_ivy
def generate_fetch_ivy(cls, jars, ivyxml, confs, resolve_hash_name): """Generates an ivy xml with all jars marked as intransitive using the all conflict manager.""" org = IvyUtils.INTERNAL_ORG_NAME name = resolve_hash_name extra_configurations = [conf for conf in confs if conf and conf != 'default'] # Use org name _and_ rev so that we can have dependencies with different versions. This will # allow for batching fetching if we want to do that. jars_by_key = OrderedDict() for jar in jars: jars_by_key.setdefault((jar.org, jar.name, jar.rev), []).append(jar) dependencies = [cls._generate_fetch_jar_template(_jars) for _jars in jars_by_key.values()] template_data = TemplateData(org=org, module=name, extra_configurations=extra_configurations, dependencies=dependencies) template_relpath = os.path.join('templates', 'ivy_utils', 'ivy_fetch.xml.mustache') cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
python
def generate_fetch_ivy(cls, jars, ivyxml, confs, resolve_hash_name): """Generates an ivy xml with all jars marked as intransitive using the all conflict manager.""" org = IvyUtils.INTERNAL_ORG_NAME name = resolve_hash_name extra_configurations = [conf for conf in confs if conf and conf != 'default'] # Use org name _and_ rev so that we can have dependencies with different versions. This will # allow for batching fetching if we want to do that. jars_by_key = OrderedDict() for jar in jars: jars_by_key.setdefault((jar.org, jar.name, jar.rev), []).append(jar) dependencies = [cls._generate_fetch_jar_template(_jars) for _jars in jars_by_key.values()] template_data = TemplateData(org=org, module=name, extra_configurations=extra_configurations, dependencies=dependencies) template_relpath = os.path.join('templates', 'ivy_utils', 'ivy_fetch.xml.mustache') cls._write_ivy_xml_file(ivyxml, template_data, template_relpath)
['def', 'generate_fetch_ivy', '(', 'cls', ',', 'jars', ',', 'ivyxml', ',', 'confs', ',', 'resolve_hash_name', ')', ':', 'org', '=', 'IvyUtils', '.', 'INTERNAL_ORG_NAME', 'name', '=', 'resolve_hash_name', 'extra_configurations', '=', '[', 'conf', 'for', 'conf', 'in', 'confs', 'if', 'conf', 'and', 'conf', '!=', "'default'", ']', '# Use org name _and_ rev so that we can have dependencies with different versions. This will', '# allow for batching fetching if we want to do that.', 'jars_by_key', '=', 'OrderedDict', '(', ')', 'for', 'jar', 'in', 'jars', ':', 'jars_by_key', '.', 'setdefault', '(', '(', 'jar', '.', 'org', ',', 'jar', '.', 'name', ',', 'jar', '.', 'rev', ')', ',', '[', ']', ')', '.', 'append', '(', 'jar', ')', 'dependencies', '=', '[', 'cls', '.', '_generate_fetch_jar_template', '(', '_jars', ')', 'for', '_jars', 'in', 'jars_by_key', '.', 'values', '(', ')', ']', 'template_data', '=', 'TemplateData', '(', 'org', '=', 'org', ',', 'module', '=', 'name', ',', 'extra_configurations', '=', 'extra_configurations', ',', 'dependencies', '=', 'dependencies', ')', 'template_relpath', '=', 'os', '.', 'path', '.', 'join', '(', "'templates'", ',', "'ivy_utils'", ',', "'ivy_fetch.xml.mustache'", ')', 'cls', '.', '_write_ivy_xml_file', '(', 'ivyxml', ',', 'template_data', ',', 'template_relpath', ')']
Generates an ivy xml with all jars marked as intransitive using the all conflict manager.
['Generates', 'an', 'ivy', 'xml', 'with', 'all', 'jars', 'marked', 'as', 'intransitive', 'using', 'the', 'all', 'conflict', 'manager', '.']
train
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/ivy_utils.py#L995-L1017
235
openmicroanalysis/pyxray
pyxray/base.py
_Database.get_default_reference
def get_default_reference(self, method): """ Returns the default reference for a method. :arg method: name of a method :type method: :class:`str` :return: reference :rtype: :class:`Reference <pyxray.descriptor.Reference>` or :class:`str` """ if method not in self._available_methods: raise ValueError('Unknown method: {0}'.format(method)) return self._default_references.get(method)
python
def get_default_reference(self, method): """ Returns the default reference for a method. :arg method: name of a method :type method: :class:`str` :return: reference :rtype: :class:`Reference <pyxray.descriptor.Reference>` or :class:`str` """ if method not in self._available_methods: raise ValueError('Unknown method: {0}'.format(method)) return self._default_references.get(method)
['def', 'get_default_reference', '(', 'self', ',', 'method', ')', ':', 'if', 'method', 'not', 'in', 'self', '.', '_available_methods', ':', 'raise', 'ValueError', '(', "'Unknown method: {0}'", '.', 'format', '(', 'method', ')', ')', 'return', 'self', '.', '_default_references', '.', 'get', '(', 'method', ')']
Returns the default reference for a method. :arg method: name of a method :type method: :class:`str` :return: reference :rtype: :class:`Reference <pyxray.descriptor.Reference>` or :class:`str`
['Returns', 'the', 'default', 'reference', 'for', 'a', 'method', '.']
train
https://github.com/openmicroanalysis/pyxray/blob/cae89677f00ebcc0952f94d1ab70e6b35e1a51e9/pyxray/base.py#L87-L99
236
ninuxorg/nodeshot
nodeshot/interop/sync/synchronizers/cnml.py
Cnml.parse
def parse(self): """ parse data """ url = self.config.get('url') self.cnml = CNMLParser(url) self.parsed_data = self.cnml.getNodes()
python
def parse(self): """ parse data """ url = self.config.get('url') self.cnml = CNMLParser(url) self.parsed_data = self.cnml.getNodes()
['def', 'parse', '(', 'self', ')', ':', 'url', '=', 'self', '.', 'config', '.', 'get', '(', "'url'", ')', 'self', '.', 'cnml', '=', 'CNMLParser', '(', 'url', ')', 'self', '.', 'parsed_data', '=', 'self', '.', 'cnml', '.', 'getNodes', '(', ')']
parse data
['parse', 'data']
train
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/interop/sync/synchronizers/cnml.py#L130-L134
237
saltstack/salt
salt/cloud/clouds/opennebula.py
vm_info
def vm_info(name, call=None): ''' Retrieves information for a given virtual machine. A VM name must be supplied. .. versionadded:: 2016.3.0 name The name of the VM for which to gather information. CLI Example: .. code-block:: bash salt-cloud -a vm_info my-vm ''' if call != 'action': raise SaltCloudSystemExit( 'The vm_info action must be called with -a or --action.' ) server, user, password = _get_xml_rpc() auth = ':'.join([user, password]) vm_id = int(get_vm_id(kwargs={'name': name})) response = server.one.vm.info(auth, vm_id) if response[0] is False: return response[1] else: info = {} tree = _get_xml(response[1]) info[tree.find('NAME').text] = _xml_to_dict(tree) return info
python
def vm_info(name, call=None): ''' Retrieves information for a given virtual machine. A VM name must be supplied. .. versionadded:: 2016.3.0 name The name of the VM for which to gather information. CLI Example: .. code-block:: bash salt-cloud -a vm_info my-vm ''' if call != 'action': raise SaltCloudSystemExit( 'The vm_info action must be called with -a or --action.' ) server, user, password = _get_xml_rpc() auth = ':'.join([user, password]) vm_id = int(get_vm_id(kwargs={'name': name})) response = server.one.vm.info(auth, vm_id) if response[0] is False: return response[1] else: info = {} tree = _get_xml(response[1]) info[tree.find('NAME').text] = _xml_to_dict(tree) return info
['def', 'vm_info', '(', 'name', ',', 'call', '=', 'None', ')', ':', 'if', 'call', '!=', "'action'", ':', 'raise', 'SaltCloudSystemExit', '(', "'The vm_info action must be called with -a or --action.'", ')', 'server', ',', 'user', ',', 'password', '=', '_get_xml_rpc', '(', ')', 'auth', '=', "':'", '.', 'join', '(', '[', 'user', ',', 'password', ']', ')', 'vm_id', '=', 'int', '(', 'get_vm_id', '(', 'kwargs', '=', '{', "'name'", ':', 'name', '}', ')', ')', 'response', '=', 'server', '.', 'one', '.', 'vm', '.', 'info', '(', 'auth', ',', 'vm_id', ')', 'if', 'response', '[', '0', ']', 'is', 'False', ':', 'return', 'response', '[', '1', ']', 'else', ':', 'info', '=', '{', '}', 'tree', '=', '_get_xml', '(', 'response', '[', '1', ']', ')', 'info', '[', 'tree', '.', 'find', '(', "'NAME'", ')', '.', 'text', ']', '=', '_xml_to_dict', '(', 'tree', ')', 'return', 'info']
Retrieves information for a given virtual machine. A VM name must be supplied. .. versionadded:: 2016.3.0 name The name of the VM for which to gather information. CLI Example: .. code-block:: bash salt-cloud -a vm_info my-vm
['Retrieves', 'information', 'for', 'a', 'given', 'virtual', 'machine', '.', 'A', 'VM', 'name', 'must', 'be', 'supplied', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/opennebula.py#L3373-L3404
238
thespacedoctor/transientNamer
transientNamer/search.py
search.sources
def sources( self): """*The results of the search returned as a python list of dictionaries* **Usage:** .. code-block:: python sources = tns.sources """ sourceResultsList = [] sourceResultsList[:] = [dict(l) for l in self.sourceResultsList] return sourceResultsList
python
def sources( self): """*The results of the search returned as a python list of dictionaries* **Usage:** .. code-block:: python sources = tns.sources """ sourceResultsList = [] sourceResultsList[:] = [dict(l) for l in self.sourceResultsList] return sourceResultsList
['def', 'sources', '(', 'self', ')', ':', 'sourceResultsList', '=', '[', ']', 'sourceResultsList', '[', ':', ']', '=', '[', 'dict', '(', 'l', ')', 'for', 'l', 'in', 'self', '.', 'sourceResultsList', ']', 'return', 'sourceResultsList']
*The results of the search returned as a python list of dictionaries* **Usage:** .. code-block:: python sources = tns.sources
['*', 'The', 'results', 'of', 'the', 'search', 'returned', 'as', 'a', 'python', 'list', 'of', 'dictionaries', '*']
train
https://github.com/thespacedoctor/transientNamer/blob/39be410c84275ed4669632f5df67e728d66a318f/transientNamer/search.py#L152-L164
239
google/grr
grr/server/grr_response_server/export.py
ArtifactFilesDownloaderResultConverter.GetExportedResult
def GetExportedResult(self, original_result, converter, metadata=None, token=None): """Converts original result via given converter..""" exported_results = list( converter.Convert( metadata or ExportedMetadata(), original_result, token=token)) if not exported_results: raise ExportError("Got 0 exported result when a single one " "was expected.") if len(exported_results) > 1: raise ExportError("Got > 1 exported results when a single " "one was expected, seems like a logical bug.") return exported_results[0]
python
def GetExportedResult(self, original_result, converter, metadata=None, token=None): """Converts original result via given converter..""" exported_results = list( converter.Convert( metadata or ExportedMetadata(), original_result, token=token)) if not exported_results: raise ExportError("Got 0 exported result when a single one " "was expected.") if len(exported_results) > 1: raise ExportError("Got > 1 exported results when a single " "one was expected, seems like a logical bug.") return exported_results[0]
['def', 'GetExportedResult', '(', 'self', ',', 'original_result', ',', 'converter', ',', 'metadata', '=', 'None', ',', 'token', '=', 'None', ')', ':', 'exported_results', '=', 'list', '(', 'converter', '.', 'Convert', '(', 'metadata', 'or', 'ExportedMetadata', '(', ')', ',', 'original_result', ',', 'token', '=', 'token', ')', ')', 'if', 'not', 'exported_results', ':', 'raise', 'ExportError', '(', '"Got 0 exported result when a single one "', '"was expected."', ')', 'if', 'len', '(', 'exported_results', ')', '>', '1', ':', 'raise', 'ExportError', '(', '"Got > 1 exported results when a single "', '"one was expected, seems like a logical bug."', ')', 'return', 'exported_results', '[', '0', ']']
Converts original result via given converter..
['Converts', 'original', 'result', 'via', 'given', 'converter', '..']
train
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/export.py#L1366-L1385
240
mozilla-releng/scriptworker
scriptworker/artifacts.py
retry_create_artifact
async def retry_create_artifact(*args, **kwargs): """Retry create_artifact() calls. Args: *args: the args to pass on to create_artifact **kwargs: the args to pass on to create_artifact """ await retry_async( create_artifact, retry_exceptions=( ScriptWorkerRetryException, aiohttp.ClientError ), args=args, kwargs=kwargs )
python
async def retry_create_artifact(*args, **kwargs): """Retry create_artifact() calls. Args: *args: the args to pass on to create_artifact **kwargs: the args to pass on to create_artifact """ await retry_async( create_artifact, retry_exceptions=( ScriptWorkerRetryException, aiohttp.ClientError ), args=args, kwargs=kwargs )
['async', 'def', 'retry_create_artifact', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'await', 'retry_async', '(', 'create_artifact', ',', 'retry_exceptions', '=', '(', 'ScriptWorkerRetryException', ',', 'aiohttp', '.', 'ClientError', ')', ',', 'args', '=', 'args', ',', 'kwargs', '=', 'kwargs', ')']
Retry create_artifact() calls. Args: *args: the args to pass on to create_artifact **kwargs: the args to pass on to create_artifact
['Retry', 'create_artifact', '()', 'calls', '.']
train
https://github.com/mozilla-releng/scriptworker/blob/8e97bbd83b9b578565ec57904c966dd6ae4ef0ae/scriptworker/artifacts.py#L133-L149
241
treyhunner/django-simple-history
simple_history/models.py
HistoricalRecords.create_history_model
def create_history_model(self, model, inherited): """ Creates a historical model to associate with the model provided. """ attrs = { "__module__": self.module, "_history_excluded_fields": self.excluded_fields, } app_module = "%s.models" % model._meta.app_label if inherited: # inherited use models module attrs["__module__"] = model.__module__ elif model.__module__ != self.module: # registered under different app attrs["__module__"] = self.module elif app_module != self.module: # Abuse an internal API because the app registry is loading. app = apps.app_configs[model._meta.app_label] models_module = app.name attrs["__module__"] = models_module fields = self.copy_fields(model) attrs.update(fields) attrs.update(self.get_extra_fields(model, fields)) # type in python2 wants str as a first argument attrs.update(Meta=type(str("Meta"), (), self.get_meta_options(model))) if self.table_name is not None: attrs["Meta"].db_table = self.table_name # Set as the default then check for overrides name = self.get_history_model_name(model) registered_models[model._meta.db_table] = model return python_2_unicode_compatible(type(str(name), self.bases, attrs))
python
def create_history_model(self, model, inherited): """ Creates a historical model to associate with the model provided. """ attrs = { "__module__": self.module, "_history_excluded_fields": self.excluded_fields, } app_module = "%s.models" % model._meta.app_label if inherited: # inherited use models module attrs["__module__"] = model.__module__ elif model.__module__ != self.module: # registered under different app attrs["__module__"] = self.module elif app_module != self.module: # Abuse an internal API because the app registry is loading. app = apps.app_configs[model._meta.app_label] models_module = app.name attrs["__module__"] = models_module fields = self.copy_fields(model) attrs.update(fields) attrs.update(self.get_extra_fields(model, fields)) # type in python2 wants str as a first argument attrs.update(Meta=type(str("Meta"), (), self.get_meta_options(model))) if self.table_name is not None: attrs["Meta"].db_table = self.table_name # Set as the default then check for overrides name = self.get_history_model_name(model) registered_models[model._meta.db_table] = model return python_2_unicode_compatible(type(str(name), self.bases, attrs))
['def', 'create_history_model', '(', 'self', ',', 'model', ',', 'inherited', ')', ':', 'attrs', '=', '{', '"__module__"', ':', 'self', '.', 'module', ',', '"_history_excluded_fields"', ':', 'self', '.', 'excluded_fields', ',', '}', 'app_module', '=', '"%s.models"', '%', 'model', '.', '_meta', '.', 'app_label', 'if', 'inherited', ':', '# inherited use models module', 'attrs', '[', '"__module__"', ']', '=', 'model', '.', '__module__', 'elif', 'model', '.', '__module__', '!=', 'self', '.', 'module', ':', '# registered under different app', 'attrs', '[', '"__module__"', ']', '=', 'self', '.', 'module', 'elif', 'app_module', '!=', 'self', '.', 'module', ':', '# Abuse an internal API because the app registry is loading.', 'app', '=', 'apps', '.', 'app_configs', '[', 'model', '.', '_meta', '.', 'app_label', ']', 'models_module', '=', 'app', '.', 'name', 'attrs', '[', '"__module__"', ']', '=', 'models_module', 'fields', '=', 'self', '.', 'copy_fields', '(', 'model', ')', 'attrs', '.', 'update', '(', 'fields', ')', 'attrs', '.', 'update', '(', 'self', '.', 'get_extra_fields', '(', 'model', ',', 'fields', ')', ')', '# type in python2 wants str as a first argument', 'attrs', '.', 'update', '(', 'Meta', '=', 'type', '(', 'str', '(', '"Meta"', ')', ',', '(', ')', ',', 'self', '.', 'get_meta_options', '(', 'model', ')', ')', ')', 'if', 'self', '.', 'table_name', 'is', 'not', 'None', ':', 'attrs', '[', '"Meta"', ']', '.', 'db_table', '=', 'self', '.', 'table_name', '# Set as the default then check for overrides', 'name', '=', 'self', '.', 'get_history_model_name', '(', 'model', ')', 'registered_models', '[', 'model', '.', '_meta', '.', 'db_table', ']', '=', 'model', 'return', 'python_2_unicode_compatible', '(', 'type', '(', 'str', '(', 'name', ')', ',', 'self', '.', 'bases', ',', 'attrs', ')', ')']
Creates a historical model to associate with the model provided.
['Creates', 'a', 'historical', 'model', 'to', 'associate', 'with', 'the', 'model', 'provided', '.']
train
https://github.com/treyhunner/django-simple-history/blob/85758ecfe608279508a3fb5b71654d3e202eb63d/simple_history/models.py#L193-L228
242
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py
promote_loops
def promote_loops( loops, index, shared ): """Turn loops into "objects" that can be processed normally""" for loop in loops: loop = list(loop) members = [index[addr] for addr in loop] external_parents = list(set([ addr for addr in sum([shared.get(addr,[]) for addr in loop],[]) if addr not in loop ])) if external_parents: if len(external_parents) == 1: # potentially a loop that's been looped... parent = index.get( external_parents[0] ) if parent['type'] == LOOP_TYPE: continue # we haven't already been looped... loop_addr = new_address( index ) shared[loop_addr] = external_parents loop_record = index[loop_addr] = { 'address': loop_addr, 'refs': loop, 'parents': external_parents, 'type': LOOP_TYPE, 'size': 0, } for member in members: # member's references must *not* point to loop... member['refs'] = [ ref for ref in member['refs'] if ref not in loop ] # member's parents are *just* the loop member['parents'][:] = [loop_addr] # each referent to loop holds a single reference to the loop rather than many to children for parent in external_parents: parent = index[parent] for member in members: rewrite_references( parent['refs'], member['address'], None ) parent['refs'].append( loop_addr )
python
def promote_loops( loops, index, shared ): """Turn loops into "objects" that can be processed normally""" for loop in loops: loop = list(loop) members = [index[addr] for addr in loop] external_parents = list(set([ addr for addr in sum([shared.get(addr,[]) for addr in loop],[]) if addr not in loop ])) if external_parents: if len(external_parents) == 1: # potentially a loop that's been looped... parent = index.get( external_parents[0] ) if parent['type'] == LOOP_TYPE: continue # we haven't already been looped... loop_addr = new_address( index ) shared[loop_addr] = external_parents loop_record = index[loop_addr] = { 'address': loop_addr, 'refs': loop, 'parents': external_parents, 'type': LOOP_TYPE, 'size': 0, } for member in members: # member's references must *not* point to loop... member['refs'] = [ ref for ref in member['refs'] if ref not in loop ] # member's parents are *just* the loop member['parents'][:] = [loop_addr] # each referent to loop holds a single reference to the loop rather than many to children for parent in external_parents: parent = index[parent] for member in members: rewrite_references( parent['refs'], member['address'], None ) parent['refs'].append( loop_addr )
['def', 'promote_loops', '(', 'loops', ',', 'index', ',', 'shared', ')', ':', 'for', 'loop', 'in', 'loops', ':', 'loop', '=', 'list', '(', 'loop', ')', 'members', '=', '[', 'index', '[', 'addr', ']', 'for', 'addr', 'in', 'loop', ']', 'external_parents', '=', 'list', '(', 'set', '(', '[', 'addr', 'for', 'addr', 'in', 'sum', '(', '[', 'shared', '.', 'get', '(', 'addr', ',', '[', ']', ')', 'for', 'addr', 'in', 'loop', ']', ',', '[', ']', ')', 'if', 'addr', 'not', 'in', 'loop', ']', ')', ')', 'if', 'external_parents', ':', 'if', 'len', '(', 'external_parents', ')', '==', '1', ':', "# potentially a loop that's been looped...", 'parent', '=', 'index', '.', 'get', '(', 'external_parents', '[', '0', ']', ')', 'if', 'parent', '[', "'type'", ']', '==', 'LOOP_TYPE', ':', 'continue', "# we haven't already been looped...", 'loop_addr', '=', 'new_address', '(', 'index', ')', 'shared', '[', 'loop_addr', ']', '=', 'external_parents', 'loop_record', '=', 'index', '[', 'loop_addr', ']', '=', '{', "'address'", ':', 'loop_addr', ',', "'refs'", ':', 'loop', ',', "'parents'", ':', 'external_parents', ',', "'type'", ':', 'LOOP_TYPE', ',', "'size'", ':', '0', ',', '}', 'for', 'member', 'in', 'members', ':', "# member's references must *not* point to loop...", 'member', '[', "'refs'", ']', '=', '[', 'ref', 'for', 'ref', 'in', 'member', '[', "'refs'", ']', 'if', 'ref', 'not', 'in', 'loop', ']', "# member's parents are *just* the loop", 'member', '[', "'parents'", ']', '[', ':', ']', '=', '[', 'loop_addr', ']', '# each referent to loop holds a single reference to the loop rather than many to children', 'for', 'parent', 'in', 'external_parents', ':', 'parent', '=', 'index', '[', 'parent', ']', 'for', 'member', 'in', 'members', ':', 'rewrite_references', '(', 'parent', '[', "'refs'", ']', ',', 'member', '[', "'address'", ']', ',', 'None', ')', 'parent', '[', "'refs'", ']', '.', 'append', '(', 'loop_addr', ')']
Turn loops into "objects" that can be processed normally
['Turn', 'loops', 'into', 'objects', 'that', 'can', 'be', 'processed', 'normally']
train
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/runsnakerun/meliaeloader.py#L82-L120
243
SuperCowPowers/workbench
workbench/server/plugin_manager.py
PluginManager.validate
def validate(self, handler): """Validate the plugin, each plugin must have the following: 1) The worker class must have an execute method: execute(self, input_data). 2) The worker class must have a dependencies list (even if it's empty). 3) The file must have a top level test() method. Args: handler: the loaded plugin. """ # Check for the test method first test_method = self.plugin_test_validation(handler) if not test_method: return None # Here we iterate through the classes found in the module and pick # the first one that satisfies the validation for name, plugin_class in inspect.getmembers(handler, inspect.isclass): if self.plugin_class_validation(plugin_class): return {'class':plugin_class, 'test':test_method} # If we're here the plugin didn't pass validation print 'Failure for plugin: %s' % (handler.__name__) print 'Validation Error: Worker class is required to have a dependencies list and an execute method' return None
python
def validate(self, handler): """Validate the plugin, each plugin must have the following: 1) The worker class must have an execute method: execute(self, input_data). 2) The worker class must have a dependencies list (even if it's empty). 3) The file must have a top level test() method. Args: handler: the loaded plugin. """ # Check for the test method first test_method = self.plugin_test_validation(handler) if not test_method: return None # Here we iterate through the classes found in the module and pick # the first one that satisfies the validation for name, plugin_class in inspect.getmembers(handler, inspect.isclass): if self.plugin_class_validation(plugin_class): return {'class':plugin_class, 'test':test_method} # If we're here the plugin didn't pass validation print 'Failure for plugin: %s' % (handler.__name__) print 'Validation Error: Worker class is required to have a dependencies list and an execute method' return None
['def', 'validate', '(', 'self', ',', 'handler', ')', ':', '# Check for the test method first', 'test_method', '=', 'self', '.', 'plugin_test_validation', '(', 'handler', ')', 'if', 'not', 'test_method', ':', 'return', 'None', '# Here we iterate through the classes found in the module and pick', '# the first one that satisfies the validation', 'for', 'name', ',', 'plugin_class', 'in', 'inspect', '.', 'getmembers', '(', 'handler', ',', 'inspect', '.', 'isclass', ')', ':', 'if', 'self', '.', 'plugin_class_validation', '(', 'plugin_class', ')', ':', 'return', '{', "'class'", ':', 'plugin_class', ',', "'test'", ':', 'test_method', '}', "# If we're here the plugin didn't pass validation", 'print', "'Failure for plugin: %s'", '%', '(', 'handler', '.', '__name__', ')', 'print', "'Validation Error: Worker class is required to have a dependencies list and an execute method'", 'return', 'None']
Validate the plugin, each plugin must have the following: 1) The worker class must have an execute method: execute(self, input_data). 2) The worker class must have a dependencies list (even if it's empty). 3) The file must have a top level test() method. Args: handler: the loaded plugin.
['Validate', 'the', 'plugin', 'each', 'plugin', 'must', 'have', 'the', 'following', ':', '1', ')', 'The', 'worker', 'class', 'must', 'have', 'an', 'execute', 'method', ':', 'execute', '(', 'self', 'input_data', ')', '.', '2', ')', 'The', 'worker', 'class', 'must', 'have', 'a', 'dependencies', 'list', '(', 'even', 'if', 'it', 's', 'empty', ')', '.', '3', ')', 'The', 'file', 'must', 'have', 'a', 'top', 'level', 'test', '()', 'method', '.']
train
https://github.com/SuperCowPowers/workbench/blob/710232756dd717f734253315e3d0b33c9628dafb/workbench/server/plugin_manager.py#L138-L162
244
ultradns/python_rest_api_client
ultra_rest_client/ultra_rest_client.py
RestApiClient.get_rrsets_by_type_owner
def get_rrsets_by_type_owner(self, zone_name, rtype, owner_name, q=None, **kwargs): """Returns the list of RRSets in the specified zone of the specified type. Arguments: zone_name -- The name of the zone. rtype -- The type of the RRSets. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) Keyword Arguments: q -- The search parameters, in a dict. Valid keys are: ttl - must match the TTL for the rrset value - substring match of the first BIND field value sort -- The sort column used to order the list. Valid values for the sort field are: TTL TYPE reverse -- Whether the list is ascending(False) or descending(True) offset -- The position in the list for the first returned element(0 based) limit -- The maximum number of rows to be returned. """ uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name params = build_params(q, kwargs) return self.rest_api_connection.get(uri, params)
python
def get_rrsets_by_type_owner(self, zone_name, rtype, owner_name, q=None, **kwargs): """Returns the list of RRSets in the specified zone of the specified type. Arguments: zone_name -- The name of the zone. rtype -- The type of the RRSets. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) Keyword Arguments: q -- The search parameters, in a dict. Valid keys are: ttl - must match the TTL for the rrset value - substring match of the first BIND field value sort -- The sort column used to order the list. Valid values for the sort field are: TTL TYPE reverse -- Whether the list is ascending(False) or descending(True) offset -- The position in the list for the first returned element(0 based) limit -- The maximum number of rows to be returned. """ uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name params = build_params(q, kwargs) return self.rest_api_connection.get(uri, params)
['def', 'get_rrsets_by_type_owner', '(', 'self', ',', 'zone_name', ',', 'rtype', ',', 'owner_name', ',', 'q', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'uri', '=', '"/v1/zones/"', '+', 'zone_name', '+', '"/rrsets/"', '+', 'rtype', '+', '"/"', '+', 'owner_name', 'params', '=', 'build_params', '(', 'q', ',', 'kwargs', ')', 'return', 'self', '.', 'rest_api_connection', '.', 'get', '(', 'uri', ',', 'params', ')']
Returns the list of RRSets in the specified zone of the specified type. Arguments: zone_name -- The name of the zone. rtype -- The type of the RRSets. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) Keyword Arguments: q -- The search parameters, in a dict. Valid keys are: ttl - must match the TTL for the rrset value - substring match of the first BIND field value sort -- The sort column used to order the list. Valid values for the sort field are: TTL TYPE reverse -- Whether the list is ascending(False) or descending(True) offset -- The position in the list for the first returned element(0 based) limit -- The maximum number of rows to be returned.
['Returns', 'the', 'list', 'of', 'RRSets', 'in', 'the', 'specified', 'zone', 'of', 'the', 'specified', 'type', '.']
train
https://github.com/ultradns/python_rest_api_client/blob/e4095f28f5cb5e258b768c06ef7cf8b1915aa5ec/ultra_rest_client/ultra_rest_client.py#L283-L308
245
PyGithub/PyGithub
github/Repository.py
Repository.create_pull
def create_pull(self, *args, **kwds): """ :calls: `POST /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_ :param title: string :param body: string :param issue: :class:`github.Issue.Issue` :param base: string :param head: string :param maintainer_can_modify: bool :rtype: :class:`github.PullRequest.PullRequest` """ if len(args) + len(kwds) >= 4: return self.__create_pull_1(*args, **kwds) else: return self.__create_pull_2(*args, **kwds)
python
def create_pull(self, *args, **kwds): """ :calls: `POST /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_ :param title: string :param body: string :param issue: :class:`github.Issue.Issue` :param base: string :param head: string :param maintainer_can_modify: bool :rtype: :class:`github.PullRequest.PullRequest` """ if len(args) + len(kwds) >= 4: return self.__create_pull_1(*args, **kwds) else: return self.__create_pull_2(*args, **kwds)
['def', 'create_pull', '(', 'self', ',', '*', 'args', ',', '*', '*', 'kwds', ')', ':', 'if', 'len', '(', 'args', ')', '+', 'len', '(', 'kwds', ')', '>=', '4', ':', 'return', 'self', '.', '__create_pull_1', '(', '*', 'args', ',', '*', '*', 'kwds', ')', 'else', ':', 'return', 'self', '.', '__create_pull_2', '(', '*', 'args', ',', '*', '*', 'kwds', ')']
:calls: `POST /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_ :param title: string :param body: string :param issue: :class:`github.Issue.Issue` :param base: string :param head: string :param maintainer_can_modify: bool :rtype: :class:`github.PullRequest.PullRequest`
[':', 'calls', ':', 'POST', '/', 'repos', '/', ':', 'owner', '/', ':', 'repo', '/', 'pulls', '<http', ':', '//', 'developer', '.', 'github', '.', 'com', '/', 'v3', '/', 'pulls', '>', '_', ':', 'param', 'title', ':', 'string', ':', 'param', 'body', ':', 'string', ':', 'param', 'issue', ':', ':', 'class', ':', 'github', '.', 'Issue', '.', 'Issue', ':', 'param', 'base', ':', 'string', ':', 'param', 'head', ':', 'string', ':', 'param', 'maintainer_can_modify', ':', 'bool', ':', 'rtype', ':', ':', 'class', ':', 'github', '.', 'PullRequest', '.', 'PullRequest']
train
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Repository.py#L1167-L1181
246
radjkarl/imgProcessor
imgProcessor/physics/emissivity_vs_angle.py
EL_Si_module
def EL_Si_module(): ''' returns angular dependent EL emissivity of a PV module calculated of nanmedian(persp-corrected EL module/reference module) published in K. Bedrich: Quantitative Electroluminescence Measurement on PV devices PhD Thesis, 2017 ''' arr = np.array([ [2.5, 1.00281 ], [7.5, 1.00238 ], [12.5, 1.00174], [17.5, 1.00204 ], [22.5, 1.00054 ], [27.5, 0.998255], [32.5, 0.995351], [37.5, 0.991246], [42.5, 0.985304], [47.5, 0.975338], [52.5, 0.960455], [57.5, 0.937544], [62.5, 0.900607], [67.5, 0.844636], [72.5, 0.735028], [77.5, 0.57492 ], [82.5, 0.263214], [87.5, 0.123062] ]) angles = arr[:,0] vals = arr[:,1] vals[vals>1]=1 return angles, vals
python
def EL_Si_module(): ''' returns angular dependent EL emissivity of a PV module calculated of nanmedian(persp-corrected EL module/reference module) published in K. Bedrich: Quantitative Electroluminescence Measurement on PV devices PhD Thesis, 2017 ''' arr = np.array([ [2.5, 1.00281 ], [7.5, 1.00238 ], [12.5, 1.00174], [17.5, 1.00204 ], [22.5, 1.00054 ], [27.5, 0.998255], [32.5, 0.995351], [37.5, 0.991246], [42.5, 0.985304], [47.5, 0.975338], [52.5, 0.960455], [57.5, 0.937544], [62.5, 0.900607], [67.5, 0.844636], [72.5, 0.735028], [77.5, 0.57492 ], [82.5, 0.263214], [87.5, 0.123062] ]) angles = arr[:,0] vals = arr[:,1] vals[vals>1]=1 return angles, vals
['def', 'EL_Si_module', '(', ')', ':', 'arr', '=', 'np', '.', 'array', '(', '[', '[', '2.5', ',', '1.00281', ']', ',', '[', '7.5', ',', '1.00238', ']', ',', '[', '12.5', ',', '1.00174', ']', ',', '[', '17.5', ',', '1.00204', ']', ',', '[', '22.5', ',', '1.00054', ']', ',', '[', '27.5', ',', '0.998255', ']', ',', '[', '32.5', ',', '0.995351', ']', ',', '[', '37.5', ',', '0.991246', ']', ',', '[', '42.5', ',', '0.985304', ']', ',', '[', '47.5', ',', '0.975338', ']', ',', '[', '52.5', ',', '0.960455', ']', ',', '[', '57.5', ',', '0.937544', ']', ',', '[', '62.5', ',', '0.900607', ']', ',', '[', '67.5', ',', '0.844636', ']', ',', '[', '72.5', ',', '0.735028', ']', ',', '[', '77.5', ',', '0.57492', ']', ',', '[', '82.5', ',', '0.263214', ']', ',', '[', '87.5', ',', '0.123062', ']', ']', ')', 'angles', '=', 'arr', '[', ':', ',', '0', ']', 'vals', '=', 'arr', '[', ':', ',', '1', ']', 'vals', '[', 'vals', '>', '1', ']', '=', '1', 'return', 'angles', ',', 'vals']
returns angular dependent EL emissivity of a PV module calculated of nanmedian(persp-corrected EL module/reference module) published in K. Bedrich: Quantitative Electroluminescence Measurement on PV devices PhD Thesis, 2017
['returns', 'angular', 'dependent', 'EL', 'emissivity', 'of', 'a', 'PV', 'module', 'calculated', 'of', 'nanmedian', '(', 'persp', '-', 'corrected', 'EL', 'module', '/', 'reference', 'module', ')', 'published', 'in', 'K', '.', 'Bedrich', ':', 'Quantitative', 'Electroluminescence', 'Measurement', 'on', 'PV', 'devices', 'PhD', 'Thesis', '2017']
train
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/physics/emissivity_vs_angle.py#L8-L42
247
PyMySQL/Tornado-MySQL
tornado_mysql/converters.py
convert_date
def convert_date(obj): """Returns a DATE column as a date object: >>> date_or_None('2007-02-26') datetime.date(2007, 2, 26) Illegal values are returned as None: >>> date_or_None('2007-02-31') is None True >>> date_or_None('0000-00-00') is None True """ try: return datetime.date(*[ int(x) for x in obj.split('-', 2) ]) except ValueError: return None
python
def convert_date(obj): """Returns a DATE column as a date object: >>> date_or_None('2007-02-26') datetime.date(2007, 2, 26) Illegal values are returned as None: >>> date_or_None('2007-02-31') is None True >>> date_or_None('0000-00-00') is None True """ try: return datetime.date(*[ int(x) for x in obj.split('-', 2) ]) except ValueError: return None
['def', 'convert_date', '(', 'obj', ')', ':', 'try', ':', 'return', 'datetime', '.', 'date', '(', '*', '[', 'int', '(', 'x', ')', 'for', 'x', 'in', 'obj', '.', 'split', '(', "'-'", ',', '2', ')', ']', ')', 'except', 'ValueError', ':', 'return', 'None']
Returns a DATE column as a date object: >>> date_or_None('2007-02-26') datetime.date(2007, 2, 26) Illegal values are returned as None: >>> date_or_None('2007-02-31') is None True >>> date_or_None('0000-00-00') is None True
['Returns', 'a', 'DATE', 'column', 'as', 'a', 'date', 'object', ':']
train
https://github.com/PyMySQL/Tornado-MySQL/blob/75d3466e4332e43b2bf853799f1122dec5da60bc/tornado_mysql/converters.py#L202-L219
248
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py
brocade_interface_ext.get_vlan_brief_output_vlan_vlan_name
def get_vlan_brief_output_vlan_vlan_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vlan_brief = ET.Element("get_vlan_brief") config = get_vlan_brief output = ET.SubElement(get_vlan_brief, "output") vlan = ET.SubElement(output, "vlan") vlan_id_key = ET.SubElement(vlan, "vlan-id") vlan_id_key.text = kwargs.pop('vlan_id') vlan_name = ET.SubElement(vlan, "vlan-name") vlan_name.text = kwargs.pop('vlan_name') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def get_vlan_brief_output_vlan_vlan_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vlan_brief = ET.Element("get_vlan_brief") config = get_vlan_brief output = ET.SubElement(get_vlan_brief, "output") vlan = ET.SubElement(output, "vlan") vlan_id_key = ET.SubElement(vlan, "vlan-id") vlan_id_key.text = kwargs.pop('vlan_id') vlan_name = ET.SubElement(vlan, "vlan-name") vlan_name.text = kwargs.pop('vlan_name') callback = kwargs.pop('callback', self._callback) return callback(config)
['def', 'get_vlan_brief_output_vlan_vlan_name', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'config', '=', 'ET', '.', 'Element', '(', '"config"', ')', 'get_vlan_brief', '=', 'ET', '.', 'Element', '(', '"get_vlan_brief"', ')', 'config', '=', 'get_vlan_brief', 'output', '=', 'ET', '.', 'SubElement', '(', 'get_vlan_brief', ',', '"output"', ')', 'vlan', '=', 'ET', '.', 'SubElement', '(', 'output', ',', '"vlan"', ')', 'vlan_id_key', '=', 'ET', '.', 'SubElement', '(', 'vlan', ',', '"vlan-id"', ')', 'vlan_id_key', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'vlan_id'", ')', 'vlan_name', '=', 'ET', '.', 'SubElement', '(', 'vlan', ',', '"vlan-name"', ')', 'vlan_name', '.', 'text', '=', 'kwargs', '.', 'pop', '(', "'vlan_name'", ')', 'callback', '=', 'kwargs', '.', 'pop', '(', "'callback'", ',', 'self', '.', '_callback', ')', 'return', 'callback', '(', 'config', ')']
Auto Generated Code
['Auto', 'Generated', 'Code']
train
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_interface_ext.py#L111-L125
249
mitsei/dlkit
dlkit/json_/assessment_authoring/sessions.py
AssessmentPartAdminSession.get_assessment_part_form_for_update
def get_assessment_part_form_for_update(self, assessment_part_id): """Gets the assessment part form for updating an existing assessment part. A new assessment part form should be requested for each update transaction. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` return: (osid.assessment.authoring.AssessmentPartForm) - the assessment part form raise: NotFound - ``assessment_part_id`` is not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ collection = JSONClientValidated('assessment_authoring', collection='AssessmentPart', runtime=self._runtime) if not isinstance(assessment_part_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') if (assessment_part_id.get_identifier_namespace() != 'assessment_authoring.AssessmentPart' or assessment_part_id.get_authority() != self._authority): raise errors.InvalidArgument() result = collection.find_one({'_id': ObjectId(assessment_part_id.get_identifier())}) mdata = {} if not result['assessmentPartId']: pass else: parent_part_id = Id(result['assessmentPartId']) mgr = self._get_provider_manager('ASSESSMENT_AUTHORING', local=True) lookup_session = mgr.get_assessment_part_lookup_session_for_bank(self._catalog_id, proxy=self._proxy) if lookup_session.get_assessment_parts_for_assessment_part(parent_part_id).available() > 1: mdata['sequestered']['is_read_only'] = True mdata['sequestered']['is_required'] = True obj_form = objects.AssessmentPartForm(osid_object_map=result, runtime=self._runtime, proxy=self._proxy, mdata=mdata) self._forms[obj_form.get_id().get_identifier()] = not UPDATED return obj_form
python
def get_assessment_part_form_for_update(self, assessment_part_id): """Gets the assessment part form for updating an existing assessment part. A new assessment part form should be requested for each update transaction. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` return: (osid.assessment.authoring.AssessmentPartForm) - the assessment part form raise: NotFound - ``assessment_part_id`` is not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ collection = JSONClientValidated('assessment_authoring', collection='AssessmentPart', runtime=self._runtime) if not isinstance(assessment_part_id, ABCId): raise errors.InvalidArgument('the argument is not a valid OSID Id') if (assessment_part_id.get_identifier_namespace() != 'assessment_authoring.AssessmentPart' or assessment_part_id.get_authority() != self._authority): raise errors.InvalidArgument() result = collection.find_one({'_id': ObjectId(assessment_part_id.get_identifier())}) mdata = {} if not result['assessmentPartId']: pass else: parent_part_id = Id(result['assessmentPartId']) mgr = self._get_provider_manager('ASSESSMENT_AUTHORING', local=True) lookup_session = mgr.get_assessment_part_lookup_session_for_bank(self._catalog_id, proxy=self._proxy) if lookup_session.get_assessment_parts_for_assessment_part(parent_part_id).available() > 1: mdata['sequestered']['is_read_only'] = True mdata['sequestered']['is_required'] = True obj_form = objects.AssessmentPartForm(osid_object_map=result, runtime=self._runtime, proxy=self._proxy, mdata=mdata) self._forms[obj_form.get_id().get_identifier()] = not UPDATED return obj_form
['def', 'get_assessment_part_form_for_update', '(', 'self', ',', 'assessment_part_id', ')', ':', 'collection', '=', 'JSONClientValidated', '(', "'assessment_authoring'", ',', 'collection', '=', "'AssessmentPart'", ',', 'runtime', '=', 'self', '.', '_runtime', ')', 'if', 'not', 'isinstance', '(', 'assessment_part_id', ',', 'ABCId', ')', ':', 'raise', 'errors', '.', 'InvalidArgument', '(', "'the argument is not a valid OSID Id'", ')', 'if', '(', 'assessment_part_id', '.', 'get_identifier_namespace', '(', ')', '!=', "'assessment_authoring.AssessmentPart'", 'or', 'assessment_part_id', '.', 'get_authority', '(', ')', '!=', 'self', '.', '_authority', ')', ':', 'raise', 'errors', '.', 'InvalidArgument', '(', ')', 'result', '=', 'collection', '.', 'find_one', '(', '{', "'_id'", ':', 'ObjectId', '(', 'assessment_part_id', '.', 'get_identifier', '(', ')', ')', '}', ')', 'mdata', '=', '{', '}', 'if', 'not', 'result', '[', "'assessmentPartId'", ']', ':', 'pass', 'else', ':', 'parent_part_id', '=', 'Id', '(', 'result', '[', "'assessmentPartId'", ']', ')', 'mgr', '=', 'self', '.', '_get_provider_manager', '(', "'ASSESSMENT_AUTHORING'", ',', 'local', '=', 'True', ')', 'lookup_session', '=', 'mgr', '.', 'get_assessment_part_lookup_session_for_bank', '(', 'self', '.', '_catalog_id', ',', 'proxy', '=', 'self', '.', '_proxy', ')', 'if', 'lookup_session', '.', 'get_assessment_parts_for_assessment_part', '(', 'parent_part_id', ')', '.', 'available', '(', ')', '>', '1', ':', 'mdata', '[', "'sequestered'", ']', '[', "'is_read_only'", ']', '=', 'True', 'mdata', '[', "'sequestered'", ']', '[', "'is_required'", ']', '=', 'True', 'obj_form', '=', 'objects', '.', 'AssessmentPartForm', '(', 'osid_object_map', '=', 'result', ',', 'runtime', '=', 'self', '.', '_runtime', ',', 'proxy', '=', 'self', '.', '_proxy', ',', 'mdata', '=', 'mdata', ')', 'self', '.', '_forms', '[', 'obj_form', '.', 'get_id', '(', ')', '.', 'get_identifier', '(', ')', ']', '=', 'not', 'UPDATED', 'return', 'obj_form']
Gets the assessment part form for updating an existing assessment part. A new assessment part form should be requested for each update transaction. arg: assessment_part_id (osid.id.Id): the ``Id`` of the ``AssessmentPart`` return: (osid.assessment.authoring.AssessmentPartForm) - the assessment part form raise: NotFound - ``assessment_part_id`` is not found raise: NullArgument - ``assessment_part_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
['Gets', 'the', 'assessment', 'part', 'form', 'for', 'updating', 'an', 'existing', 'assessment', 'part', '.']
train
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment_authoring/sessions.py#L968-L1011
250
wrboyce/telegrambot
telegrambot/api/__init__.py
TelegramAPIMixin.send_chat_action
def send_chat_action(self, action, to): """ Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status). """ payload = dict(chat_id=to, action=action) return self._get('sendChatAction', payload)
python
def send_chat_action(self, action, to): """ Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status). """ payload = dict(chat_id=to, action=action) return self._get('sendChatAction', payload)
['def', 'send_chat_action', '(', 'self', ',', 'action', ',', 'to', ')', ':', 'payload', '=', 'dict', '(', 'chat_id', '=', 'to', ',', 'action', '=', 'action', ')', 'return', 'self', '.', '_get', '(', "'sendChatAction'", ',', 'payload', ')']
Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status).
['Use', 'this', 'method', 'when', 'you', 'need', 'to', 'tell', 'the', 'user', 'that', 'something', 'is', 'happening', 'on', 'the', 'bot', 's', 'side', '.', 'The', 'status', 'is', 'set', 'for', '5', 'seconds', 'or', 'less', '(', 'when', 'a', 'message', 'arrives', 'from', 'your', 'bot', 'Telegram', 'clients', 'clear', 'its', 'typing', 'status', ')', '.']
train
https://github.com/wrboyce/telegrambot/blob/c35ce19886df4c306a2a19851cc1f63e3066d70d/telegrambot/api/__init__.py#L178-L185
251
useblocks/groundwork
groundwork/patterns/gw_base_pattern.py
SignalsPlugin.get_receiver
def get_receiver(self, receiver=None): """ Returns a single receiver or a dictionary of receivers for this plugin. """ return self.__app.signals.get_receiver(receiver, self._plugin)
python
def get_receiver(self, receiver=None): """ Returns a single receiver or a dictionary of receivers for this plugin. """ return self.__app.signals.get_receiver(receiver, self._plugin)
['def', 'get_receiver', '(', 'self', ',', 'receiver', '=', 'None', ')', ':', 'return', 'self', '.', '__app', '.', 'signals', '.', 'get_receiver', '(', 'receiver', ',', 'self', '.', '_plugin', ')']
Returns a single receiver or a dictionary of receivers for this plugin.
['Returns', 'a', 'single', 'receiver', 'or', 'a', 'dictionary', 'of', 'receivers', 'for', 'this', 'plugin', '.']
train
https://github.com/useblocks/groundwork/blob/d34fce43f54246ca4db0f7b89e450dcdc847c68c/groundwork/patterns/gw_base_pattern.py#L300-L304
252
bokeh/bokeh
bokeh/document/events.py
ColumnDataChangedEvent.dispatch
def dispatch(self, receiver): ''' Dispatch handling of this event to a receiver. This method will invoke ``receiver._column_data_changed`` if it exists. ''' super(ColumnDataChangedEvent, self).dispatch(receiver) if hasattr(receiver, '_column_data_changed'): receiver._column_data_changed(self)
python
def dispatch(self, receiver): ''' Dispatch handling of this event to a receiver. This method will invoke ``receiver._column_data_changed`` if it exists. ''' super(ColumnDataChangedEvent, self).dispatch(receiver) if hasattr(receiver, '_column_data_changed'): receiver._column_data_changed(self)
['def', 'dispatch', '(', 'self', ',', 'receiver', ')', ':', 'super', '(', 'ColumnDataChangedEvent', ',', 'self', ')', '.', 'dispatch', '(', 'receiver', ')', 'if', 'hasattr', '(', 'receiver', ',', "'_column_data_changed'", ')', ':', 'receiver', '.', '_column_data_changed', '(', 'self', ')']
Dispatch handling of this event to a receiver. This method will invoke ``receiver._column_data_changed`` if it exists.
['Dispatch', 'handling', 'of', 'this', 'event', 'to', 'a', 'receiver', '.']
train
https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/document/events.py#L337-L345
253
epfl-lts2/pygsp
pygsp/utils.py
import_classes
def import_classes(names, src, dst): """Import classes in package from their implementation modules.""" for name in names: module = importlib.import_module('pygsp.' + src + '.' + name.lower()) setattr(sys.modules['pygsp.' + dst], name, getattr(module, name))
python
def import_classes(names, src, dst): """Import classes in package from their implementation modules.""" for name in names: module = importlib.import_module('pygsp.' + src + '.' + name.lower()) setattr(sys.modules['pygsp.' + dst], name, getattr(module, name))
['def', 'import_classes', '(', 'names', ',', 'src', ',', 'dst', ')', ':', 'for', 'name', 'in', 'names', ':', 'module', '=', 'importlib', '.', 'import_module', '(', "'pygsp.'", '+', 'src', '+', "'.'", '+', 'name', '.', 'lower', '(', ')', ')', 'setattr', '(', 'sys', '.', 'modules', '[', "'pygsp.'", '+', 'dst', ']', ',', 'name', ',', 'getattr', '(', 'module', ',', 'name', ')', ')']
Import classes in package from their implementation modules.
['Import', 'classes', 'in', 'package', 'from', 'their', 'implementation', 'modules', '.']
train
https://github.com/epfl-lts2/pygsp/blob/8ce5bde39206129287375af24fdbcd7edddca8c5/pygsp/utils.py#L355-L359
254
thiagopbueno/pyrddl
pyrddl/rddl.py
RDDL._build_fluent_table
def _build_fluent_table(self): '''Builds the fluent table for each RDDL pvariable.''' self.fluent_table = collections.OrderedDict() for name, size in zip(self.domain.non_fluent_ordering, self.non_fluent_size): non_fluent = self.domain.non_fluents[name] self.fluent_table[name] = (non_fluent, size) for name, size in zip(self.domain.state_fluent_ordering, self.state_size): fluent = self.domain.state_fluents[name] self.fluent_table[name] = (fluent, size) for name, size in zip(self.domain.action_fluent_ordering, self.action_size): fluent = self.domain.action_fluents[name] self.fluent_table[name] = (fluent, size) for name, size in zip(self.domain.interm_fluent_ordering, self.interm_size): fluent = self.domain.intermediate_fluents[name] self.fluent_table[name] = (fluent, size)
python
def _build_fluent_table(self): '''Builds the fluent table for each RDDL pvariable.''' self.fluent_table = collections.OrderedDict() for name, size in zip(self.domain.non_fluent_ordering, self.non_fluent_size): non_fluent = self.domain.non_fluents[name] self.fluent_table[name] = (non_fluent, size) for name, size in zip(self.domain.state_fluent_ordering, self.state_size): fluent = self.domain.state_fluents[name] self.fluent_table[name] = (fluent, size) for name, size in zip(self.domain.action_fluent_ordering, self.action_size): fluent = self.domain.action_fluents[name] self.fluent_table[name] = (fluent, size) for name, size in zip(self.domain.interm_fluent_ordering, self.interm_size): fluent = self.domain.intermediate_fluents[name] self.fluent_table[name] = (fluent, size)
['def', '_build_fluent_table', '(', 'self', ')', ':', 'self', '.', 'fluent_table', '=', 'collections', '.', 'OrderedDict', '(', ')', 'for', 'name', ',', 'size', 'in', 'zip', '(', 'self', '.', 'domain', '.', 'non_fluent_ordering', ',', 'self', '.', 'non_fluent_size', ')', ':', 'non_fluent', '=', 'self', '.', 'domain', '.', 'non_fluents', '[', 'name', ']', 'self', '.', 'fluent_table', '[', 'name', ']', '=', '(', 'non_fluent', ',', 'size', ')', 'for', 'name', ',', 'size', 'in', 'zip', '(', 'self', '.', 'domain', '.', 'state_fluent_ordering', ',', 'self', '.', 'state_size', ')', ':', 'fluent', '=', 'self', '.', 'domain', '.', 'state_fluents', '[', 'name', ']', 'self', '.', 'fluent_table', '[', 'name', ']', '=', '(', 'fluent', ',', 'size', ')', 'for', 'name', ',', 'size', 'in', 'zip', '(', 'self', '.', 'domain', '.', 'action_fluent_ordering', ',', 'self', '.', 'action_size', ')', ':', 'fluent', '=', 'self', '.', 'domain', '.', 'action_fluents', '[', 'name', ']', 'self', '.', 'fluent_table', '[', 'name', ']', '=', '(', 'fluent', ',', 'size', ')', 'for', 'name', ',', 'size', 'in', 'zip', '(', 'self', '.', 'domain', '.', 'interm_fluent_ordering', ',', 'self', '.', 'interm_size', ')', ':', 'fluent', '=', 'self', '.', 'domain', '.', 'intermediate_fluents', '[', 'name', ']', 'self', '.', 'fluent_table', '[', 'name', ']', '=', '(', 'fluent', ',', 'size', ')']
Builds the fluent table for each RDDL pvariable.
['Builds', 'the', 'fluent', 'table', 'for', 'each', 'RDDL', 'pvariable', '.']
train
https://github.com/thiagopbueno/pyrddl/blob/3bcfa850b1a7532c7744358f3c6b9e0f8ab978c9/pyrddl/rddl.py#L73-L91
255
gwastro/pycbc
pycbc/strain/gate.py
_gates_from_cli
def _gates_from_cli(opts, gate_opt): """Parses the given `gate_opt` into something understandable by `strain.gate_data`. """ gates = {} if getattr(opts, gate_opt) is None: return gates for gate in getattr(opts, gate_opt): try: ifo, central_time, half_dur, taper_dur = gate.split(':') central_time = float(central_time) half_dur = float(half_dur) taper_dur = float(taper_dur) except ValueError: raise ValueError("--gate {} not formatted correctly; ".format( gate) + "see help") try: gates[ifo].append((central_time, half_dur, taper_dur)) except KeyError: gates[ifo] = [(central_time, half_dur, taper_dur)] return gates
python
def _gates_from_cli(opts, gate_opt): """Parses the given `gate_opt` into something understandable by `strain.gate_data`. """ gates = {} if getattr(opts, gate_opt) is None: return gates for gate in getattr(opts, gate_opt): try: ifo, central_time, half_dur, taper_dur = gate.split(':') central_time = float(central_time) half_dur = float(half_dur) taper_dur = float(taper_dur) except ValueError: raise ValueError("--gate {} not formatted correctly; ".format( gate) + "see help") try: gates[ifo].append((central_time, half_dur, taper_dur)) except KeyError: gates[ifo] = [(central_time, half_dur, taper_dur)] return gates
['def', '_gates_from_cli', '(', 'opts', ',', 'gate_opt', ')', ':', 'gates', '=', '{', '}', 'if', 'getattr', '(', 'opts', ',', 'gate_opt', ')', 'is', 'None', ':', 'return', 'gates', 'for', 'gate', 'in', 'getattr', '(', 'opts', ',', 'gate_opt', ')', ':', 'try', ':', 'ifo', ',', 'central_time', ',', 'half_dur', ',', 'taper_dur', '=', 'gate', '.', 'split', '(', "':'", ')', 'central_time', '=', 'float', '(', 'central_time', ')', 'half_dur', '=', 'float', '(', 'half_dur', ')', 'taper_dur', '=', 'float', '(', 'taper_dur', ')', 'except', 'ValueError', ':', 'raise', 'ValueError', '(', '"--gate {} not formatted correctly; "', '.', 'format', '(', 'gate', ')', '+', '"see help"', ')', 'try', ':', 'gates', '[', 'ifo', ']', '.', 'append', '(', '(', 'central_time', ',', 'half_dur', ',', 'taper_dur', ')', ')', 'except', 'KeyError', ':', 'gates', '[', 'ifo', ']', '=', '[', '(', 'central_time', ',', 'half_dur', ',', 'taper_dur', ')', ']', 'return', 'gates']
Parses the given `gate_opt` into something understandable by `strain.gate_data`.
['Parses', 'the', 'given', 'gate_opt', 'into', 'something', 'understandable', 'by', 'strain', '.', 'gate_data', '.']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/strain/gate.py#L21-L41
256
klmitch/workq
workq.py
WorkQueue.add
def add(self, item): """ Add an item to the work queue. :param item: The work item to add. An item may be of any type; however, if it is not hashable, then the work queue must either be initialized with ``unique`` set to ``False``, or a ``key`` callable must have been provided. """ # Are we to uniquify work items? if self._unique: key = self._key(item) if self._key else item # If it already has been added to the queue, do nothing if key in self._seen: return self._seen.add(key) # Add the item to the queue self._work.append(item) # We'll keep a count of the number of items that have been # through the queue self._count += 1
python
def add(self, item): """ Add an item to the work queue. :param item: The work item to add. An item may be of any type; however, if it is not hashable, then the work queue must either be initialized with ``unique`` set to ``False``, or a ``key`` callable must have been provided. """ # Are we to uniquify work items? if self._unique: key = self._key(item) if self._key else item # If it already has been added to the queue, do nothing if key in self._seen: return self._seen.add(key) # Add the item to the queue self._work.append(item) # We'll keep a count of the number of items that have been # through the queue self._count += 1
['def', 'add', '(', 'self', ',', 'item', ')', ':', '# Are we to uniquify work items?', 'if', 'self', '.', '_unique', ':', 'key', '=', 'self', '.', '_key', '(', 'item', ')', 'if', 'self', '.', '_key', 'else', 'item', '# If it already has been added to the queue, do nothing', 'if', 'key', 'in', 'self', '.', '_seen', ':', 'return', 'self', '.', '_seen', '.', 'add', '(', 'key', ')', '# Add the item to the queue', 'self', '.', '_work', '.', 'append', '(', 'item', ')', "# We'll keep a count of the number of items that have been", '# through the queue', 'self', '.', '_count', '+=', '1']
Add an item to the work queue. :param item: The work item to add. An item may be of any type; however, if it is not hashable, then the work queue must either be initialized with ``unique`` set to ``False``, or a ``key`` callable must have been provided.
['Add', 'an', 'item', 'to', 'the', 'work', 'queue', '.']
train
https://github.com/klmitch/workq/blob/6b26c7546947bd0b0c98d78cf4653411a1d09c55/workq.py#L96-L122
257
DataONEorg/d1_python
lib_client/src/d1_client/solr_client.py
SolrClient.get_field_min_max
def get_field_min_max(self, name, **query_dict): """Returns the minimum and maximum values of the specified field. This requires two search calls to the service, each requesting a single value of a single field. @param name(string) Name of the field @param q(string) Query identifying range of records for min and max values @param fq(string) Filter restricting range of query @return list of [min, max] """ param_dict = query_dict.copy() param_dict.update({'rows': 1, 'fl': name, 'sort': '%s asc' % name}) try: min_resp_dict = self._post_query(**param_dict) param_dict['sort'] = '%s desc' % name max_resp_dict = self._post_query(**param_dict) return ( min_resp_dict['response']['docs'][0][name], max_resp_dict['response']['docs'][0][name], ) except Exception: self._log.exception('Exception') raise
python
def get_field_min_max(self, name, **query_dict): """Returns the minimum and maximum values of the specified field. This requires two search calls to the service, each requesting a single value of a single field. @param name(string) Name of the field @param q(string) Query identifying range of records for min and max values @param fq(string) Filter restricting range of query @return list of [min, max] """ param_dict = query_dict.copy() param_dict.update({'rows': 1, 'fl': name, 'sort': '%s asc' % name}) try: min_resp_dict = self._post_query(**param_dict) param_dict['sort'] = '%s desc' % name max_resp_dict = self._post_query(**param_dict) return ( min_resp_dict['response']['docs'][0][name], max_resp_dict['response']['docs'][0][name], ) except Exception: self._log.exception('Exception') raise
['def', 'get_field_min_max', '(', 'self', ',', 'name', ',', '*', '*', 'query_dict', ')', ':', 'param_dict', '=', 'query_dict', '.', 'copy', '(', ')', 'param_dict', '.', 'update', '(', '{', "'rows'", ':', '1', ',', "'fl'", ':', 'name', ',', "'sort'", ':', "'%s asc'", '%', 'name', '}', ')', 'try', ':', 'min_resp_dict', '=', 'self', '.', '_post_query', '(', '*', '*', 'param_dict', ')', 'param_dict', '[', "'sort'", ']', '=', "'%s desc'", '%', 'name', 'max_resp_dict', '=', 'self', '.', '_post_query', '(', '*', '*', 'param_dict', ')', 'return', '(', 'min_resp_dict', '[', "'response'", ']', '[', "'docs'", ']', '[', '0', ']', '[', 'name', ']', ',', 'max_resp_dict', '[', "'response'", ']', '[', "'docs'", ']', '[', '0', ']', '[', 'name', ']', ',', ')', 'except', 'Exception', ':', 'self', '.', '_log', '.', 'exception', '(', "'Exception'", ')', 'raise']
Returns the minimum and maximum values of the specified field. This requires two search calls to the service, each requesting a single value of a single field. @param name(string) Name of the field @param q(string) Query identifying range of records for min and max values @param fq(string) Filter restricting range of query @return list of [min, max]
['Returns', 'the', 'minimum', 'and', 'maximum', 'values', 'of', 'the', 'specified', 'field', '.', 'This', 'requires', 'two', 'search', 'calls', 'to', 'the', 'service', 'each', 'requesting', 'a', 'single', 'value', 'of', 'a', 'single', 'field', '.']
train
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_client/src/d1_client/solr_client.py#L232-L256
258
Alignak-monitoring/alignak
alignak/objects/timeperiod.py
Timeperiod.clean_cache
def clean_cache(self): """ Clean cache with entries older than now because not used in future ;) :return: None """ now = int(time.time()) t_to_del = [] for timestamp in self.cache: if timestamp < now: t_to_del.append(timestamp) for timestamp in t_to_del: del self.cache[timestamp] # same for the invalid cache t_to_del = [] for timestamp in self.invalid_cache: if timestamp < now: t_to_del.append(timestamp) for timestamp in t_to_del: del self.invalid_cache[timestamp]
python
def clean_cache(self): """ Clean cache with entries older than now because not used in future ;) :return: None """ now = int(time.time()) t_to_del = [] for timestamp in self.cache: if timestamp < now: t_to_del.append(timestamp) for timestamp in t_to_del: del self.cache[timestamp] # same for the invalid cache t_to_del = [] for timestamp in self.invalid_cache: if timestamp < now: t_to_del.append(timestamp) for timestamp in t_to_del: del self.invalid_cache[timestamp]
['def', 'clean_cache', '(', 'self', ')', ':', 'now', '=', 'int', '(', 'time', '.', 'time', '(', ')', ')', 't_to_del', '=', '[', ']', 'for', 'timestamp', 'in', 'self', '.', 'cache', ':', 'if', 'timestamp', '<', 'now', ':', 't_to_del', '.', 'append', '(', 'timestamp', ')', 'for', 'timestamp', 'in', 't_to_del', ':', 'del', 'self', '.', 'cache', '[', 'timestamp', ']', '# same for the invalid cache', 't_to_del', '=', '[', ']', 'for', 'timestamp', 'in', 'self', '.', 'invalid_cache', ':', 'if', 'timestamp', '<', 'now', ':', 't_to_del', '.', 'append', '(', 'timestamp', ')', 'for', 'timestamp', 'in', 't_to_del', ':', 'del', 'self', '.', 'invalid_cache', '[', 'timestamp', ']']
Clean cache with entries older than now because not used in future ;) :return: None
['Clean', 'cache', 'with', 'entries', 'older', 'than', 'now', 'because', 'not', 'used', 'in', 'future', ';', ')']
train
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/objects/timeperiod.py#L381-L401
259
codenerix/django-codenerix
codenerix/views.py
GenList.set_context_json
def set_context_json(self, jsonquery): ''' Get a json parameter and rebuild the context back to a dictionary (probably kwargs) ''' # Make sure we are getting dicts if type(jsonquery) != dict: raise IOError("set_json_context() method can be called only with dictionaries, you gave me a '{}'".format(type(jsonquery))) # Set we will answer json to this request self.json = True # Transfer keys newget = {} for key in ['search', 'search_filter_button', 'page', 'pages_to_bring', 'rowsperpage', 'filters', 'year', 'month', 'day', 'hour', 'minute', 'second']: if key in jsonquery: newget[key] = jsonquery[key] # Add transformed ordering json_ordering = jsonquery.get('ordering', None) if json_ordering: # Convert to list ordering = [] for key in json_ordering: ordering.append({key: jsonquery['ordering'][key]}) # Order the result from ordering # ordering = sorted(ordering, key=lambda x: abs(x.values()[0])) ordering = sorted(ordering, key=lambda x: abs(list(x.values())[0])) # Save ordering newget['ordering'] = [] for orderer in ordering: key = list(orderer.keys())[0] value = orderer[key] if value > 0: value = 'asc' elif value < 0: value = 'desc' else: value = None if value: newget['ordering'].append({key: value}) # Get listid newget['listid'] = jsonquery.get("listid", None) # Get elementid newget['elementid'] = jsonquery.get("elementid", None) # Return new get return newget
python
def set_context_json(self, jsonquery): ''' Get a json parameter and rebuild the context back to a dictionary (probably kwargs) ''' # Make sure we are getting dicts if type(jsonquery) != dict: raise IOError("set_json_context() method can be called only with dictionaries, you gave me a '{}'".format(type(jsonquery))) # Set we will answer json to this request self.json = True # Transfer keys newget = {} for key in ['search', 'search_filter_button', 'page', 'pages_to_bring', 'rowsperpage', 'filters', 'year', 'month', 'day', 'hour', 'minute', 'second']: if key in jsonquery: newget[key] = jsonquery[key] # Add transformed ordering json_ordering = jsonquery.get('ordering', None) if json_ordering: # Convert to list ordering = [] for key in json_ordering: ordering.append({key: jsonquery['ordering'][key]}) # Order the result from ordering # ordering = sorted(ordering, key=lambda x: abs(x.values()[0])) ordering = sorted(ordering, key=lambda x: abs(list(x.values())[0])) # Save ordering newget['ordering'] = [] for orderer in ordering: key = list(orderer.keys())[0] value = orderer[key] if value > 0: value = 'asc' elif value < 0: value = 'desc' else: value = None if value: newget['ordering'].append({key: value}) # Get listid newget['listid'] = jsonquery.get("listid", None) # Get elementid newget['elementid'] = jsonquery.get("elementid", None) # Return new get return newget
['def', 'set_context_json', '(', 'self', ',', 'jsonquery', ')', ':', '# Make sure we are getting dicts', 'if', 'type', '(', 'jsonquery', ')', '!=', 'dict', ':', 'raise', 'IOError', '(', '"set_json_context() method can be called only with dictionaries, you gave me a \'{}\'"', '.', 'format', '(', 'type', '(', 'jsonquery', ')', ')', ')', '# Set we will answer json to this request', 'self', '.', 'json', '=', 'True', '# Transfer keys', 'newget', '=', '{', '}', 'for', 'key', 'in', '[', "'search'", ',', "'search_filter_button'", ',', "'page'", ',', "'pages_to_bring'", ',', "'rowsperpage'", ',', "'filters'", ',', "'year'", ',', "'month'", ',', "'day'", ',', "'hour'", ',', "'minute'", ',', "'second'", ']', ':', 'if', 'key', 'in', 'jsonquery', ':', 'newget', '[', 'key', ']', '=', 'jsonquery', '[', 'key', ']', '# Add transformed ordering', 'json_ordering', '=', 'jsonquery', '.', 'get', '(', "'ordering'", ',', 'None', ')', 'if', 'json_ordering', ':', '# Convert to list', 'ordering', '=', '[', ']', 'for', 'key', 'in', 'json_ordering', ':', 'ordering', '.', 'append', '(', '{', 'key', ':', 'jsonquery', '[', "'ordering'", ']', '[', 'key', ']', '}', ')', '# Order the result from ordering', '# ordering = sorted(ordering, key=lambda x: abs(x.values()[0]))', 'ordering', '=', 'sorted', '(', 'ordering', ',', 'key', '=', 'lambda', 'x', ':', 'abs', '(', 'list', '(', 'x', '.', 'values', '(', ')', ')', '[', '0', ']', ')', ')', '# Save ordering', 'newget', '[', "'ordering'", ']', '=', '[', ']', 'for', 'orderer', 'in', 'ordering', ':', 'key', '=', 'list', '(', 'orderer', '.', 'keys', '(', ')', ')', '[', '0', ']', 'value', '=', 'orderer', '[', 'key', ']', 'if', 'value', '>', '0', ':', 'value', '=', "'asc'", 'elif', 'value', '<', '0', ':', 'value', '=', "'desc'", 'else', ':', 'value', '=', 'None', 'if', 'value', ':', 'newget', '[', "'ordering'", ']', '.', 'append', '(', '{', 'key', ':', 'value', '}', ')', '# Get listid', 'newget', '[', "'listid'", ']', '=', 'jsonquery', '.', 'get', '(', '"listid"', ',', 'None', ')', '# Get elementid', 'newget', '[', "'elementid'", ']', '=', 'jsonquery', '.', 'get', '(', '"elementid"', ',', 'None', ')', '# Return new get', 'return', 'newget']
Get a json parameter and rebuild the context back to a dictionary (probably kwargs)
['Get', 'a', 'json', 'parameter', 'and', 'rebuild', 'the', 'context', 'back', 'to', 'a', 'dictionary', '(', 'probably', 'kwargs', ')']
train
https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/views.py#L2632-L2682
260
valohai/ulid2
ulid2.py
ulid_to_binary
def ulid_to_binary(ulid): """ Convert an ULID to its binary representation. :param ulid: An ULID (either as UUID, base32 ULID or binary) :return: Bytestring of length 16 :rtype: bytes """ if isinstance(ulid, uuid.UUID): return ulid.bytes if isinstance(ulid, (text_type, bytes)) and len(ulid) == 26: return decode_ulid_base32(ulid) if isinstance(ulid, (bytes, bytearray)) and len(ulid) == 16: return ulid raise InvalidULID('can not convert ulid %r to binary' % ulid)
python
def ulid_to_binary(ulid): """ Convert an ULID to its binary representation. :param ulid: An ULID (either as UUID, base32 ULID or binary) :return: Bytestring of length 16 :rtype: bytes """ if isinstance(ulid, uuid.UUID): return ulid.bytes if isinstance(ulid, (text_type, bytes)) and len(ulid) == 26: return decode_ulid_base32(ulid) if isinstance(ulid, (bytes, bytearray)) and len(ulid) == 16: return ulid raise InvalidULID('can not convert ulid %r to binary' % ulid)
['def', 'ulid_to_binary', '(', 'ulid', ')', ':', 'if', 'isinstance', '(', 'ulid', ',', 'uuid', '.', 'UUID', ')', ':', 'return', 'ulid', '.', 'bytes', 'if', 'isinstance', '(', 'ulid', ',', '(', 'text_type', ',', 'bytes', ')', ')', 'and', 'len', '(', 'ulid', ')', '==', '26', ':', 'return', 'decode_ulid_base32', '(', 'ulid', ')', 'if', 'isinstance', '(', 'ulid', ',', '(', 'bytes', ',', 'bytearray', ')', ')', 'and', 'len', '(', 'ulid', ')', '==', '16', ':', 'return', 'ulid', 'raise', 'InvalidULID', '(', "'can not convert ulid %r to binary'", '%', 'ulid', ')']
Convert an ULID to its binary representation. :param ulid: An ULID (either as UUID, base32 ULID or binary) :return: Bytestring of length 16 :rtype: bytes
['Convert', 'an', 'ULID', 'to', 'its', 'binary', 'representation', '.']
train
https://github.com/valohai/ulid2/blob/cebc523ac70c5d5ca055c0c3de6318de617b07d7/ulid2.py#L262-L276
261
saltstack/salt
salt/cloud/clouds/packet.py
_wait_for_status
def _wait_for_status(status_type, object_id, status=None, timeout=500, quiet=True): ''' Wait for a certain status from Packet. status_type device or volume object_id The ID of the Packet device or volume to wait on. Required. status The status to wait for. timeout The amount of time to wait for a status to update. quiet Log status updates to debug logs when False. Otherwise, logs to info. ''' if status is None: status = "ok" interval = 5 iterations = int(timeout / interval) vm_ = get_configured_provider() manager = packet.Manager(auth_token=vm_['token']) for i in range(0, iterations): get_object = getattr(manager, "get_{status_type}".format(status_type=status_type)) obj = get_object(object_id) if obj.state == status: return obj time.sleep(interval) log.log( logging.INFO if not quiet else logging.DEBUG, 'Status for Packet %s is \'%s\', waiting for \'%s\'.', object_id, obj.state, status ) return obj
python
def _wait_for_status(status_type, object_id, status=None, timeout=500, quiet=True): ''' Wait for a certain status from Packet. status_type device or volume object_id The ID of the Packet device or volume to wait on. Required. status The status to wait for. timeout The amount of time to wait for a status to update. quiet Log status updates to debug logs when False. Otherwise, logs to info. ''' if status is None: status = "ok" interval = 5 iterations = int(timeout / interval) vm_ = get_configured_provider() manager = packet.Manager(auth_token=vm_['token']) for i in range(0, iterations): get_object = getattr(manager, "get_{status_type}".format(status_type=status_type)) obj = get_object(object_id) if obj.state == status: return obj time.sleep(interval) log.log( logging.INFO if not quiet else logging.DEBUG, 'Status for Packet %s is \'%s\', waiting for \'%s\'.', object_id, obj.state, status ) return obj
['def', '_wait_for_status', '(', 'status_type', ',', 'object_id', ',', 'status', '=', 'None', ',', 'timeout', '=', '500', ',', 'quiet', '=', 'True', ')', ':', 'if', 'status', 'is', 'None', ':', 'status', '=', '"ok"', 'interval', '=', '5', 'iterations', '=', 'int', '(', 'timeout', '/', 'interval', ')', 'vm_', '=', 'get_configured_provider', '(', ')', 'manager', '=', 'packet', '.', 'Manager', '(', 'auth_token', '=', 'vm_', '[', "'token'", ']', ')', 'for', 'i', 'in', 'range', '(', '0', ',', 'iterations', ')', ':', 'get_object', '=', 'getattr', '(', 'manager', ',', '"get_{status_type}"', '.', 'format', '(', 'status_type', '=', 'status_type', ')', ')', 'obj', '=', 'get_object', '(', 'object_id', ')', 'if', 'obj', '.', 'state', '==', 'status', ':', 'return', 'obj', 'time', '.', 'sleep', '(', 'interval', ')', 'log', '.', 'log', '(', 'logging', '.', 'INFO', 'if', 'not', 'quiet', 'else', 'logging', '.', 'DEBUG', ',', "'Status for Packet %s is \\'%s\\', waiting for \\'%s\\'.'", ',', 'object_id', ',', 'obj', '.', 'state', ',', 'status', ')', 'return', 'obj']
Wait for a certain status from Packet. status_type device or volume object_id The ID of the Packet device or volume to wait on. Required. status The status to wait for. timeout The amount of time to wait for a status to update. quiet Log status updates to debug logs when False. Otherwise, logs to info.
['Wait', 'for', 'a', 'certain', 'status', 'from', 'Packet', '.', 'status_type', 'device', 'or', 'volume', 'object_id', 'The', 'ID', 'of', 'the', 'Packet', 'device', 'or', 'volume', 'to', 'wait', 'on', '.', 'Required', '.', 'status', 'The', 'status', 'to', 'wait', 'for', '.', 'timeout', 'The', 'amount', 'of', 'time', 'to', 'wait', 'for', 'a', 'status', 'to', 'update', '.', 'quiet', 'Log', 'status', 'updates', 'to', 'debug', 'logs', 'when', 'False', '.', 'Otherwise', 'logs', 'to', 'info', '.']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/packet.py#L227-L264
262
jorgeecardona/dynect
dynect/__init__.py
Dynect.remove_address
def remove_address(self, fqdn, address): " Remove an address of a domain." # Get a list of addresses. for record in self.list_address(fqdn): if record.address == address: record.delete() break
python
def remove_address(self, fqdn, address): " Remove an address of a domain." # Get a list of addresses. for record in self.list_address(fqdn): if record.address == address: record.delete() break
['def', 'remove_address', '(', 'self', ',', 'fqdn', ',', 'address', ')', ':', '# Get a list of addresses.', 'for', 'record', 'in', 'self', '.', 'list_address', '(', 'fqdn', ')', ':', 'if', 'record', '.', 'address', '==', 'address', ':', 'record', '.', 'delete', '(', ')', 'break']
Remove an address of a domain.
['Remove', 'an', 'address', 'of', 'a', 'domain', '.']
train
https://github.com/jorgeecardona/dynect/blob/d2cd85bc510f00108a3a5bfe515f45daae15a482/dynect/__init__.py#L277-L284
263
CitrineInformatics/pypif
pypif/util/case.py
to_camel_case
def to_camel_case(snake_case_string): """ Convert a string from snake case to camel case. For example, "some_var" would become "someVar". :param snake_case_string: Snake-cased string to convert to camel case. :returns: Camel-cased version of snake_case_string. """ parts = snake_case_string.lstrip('_').split('_') return parts[0] + ''.join([i.title() for i in parts[1:]])
python
def to_camel_case(snake_case_string): """ Convert a string from snake case to camel case. For example, "some_var" would become "someVar". :param snake_case_string: Snake-cased string to convert to camel case. :returns: Camel-cased version of snake_case_string. """ parts = snake_case_string.lstrip('_').split('_') return parts[0] + ''.join([i.title() for i in parts[1:]])
['def', 'to_camel_case', '(', 'snake_case_string', ')', ':', 'parts', '=', 'snake_case_string', '.', 'lstrip', '(', "'_'", ')', '.', 'split', '(', "'_'", ')', 'return', 'parts', '[', '0', ']', '+', "''", '.', 'join', '(', '[', 'i', '.', 'title', '(', ')', 'for', 'i', 'in', 'parts', '[', '1', ':', ']', ']', ')']
Convert a string from snake case to camel case. For example, "some_var" would become "someVar". :param snake_case_string: Snake-cased string to convert to camel case. :returns: Camel-cased version of snake_case_string.
['Convert', 'a', 'string', 'from', 'snake', 'case', 'to', 'camel', 'case', '.', 'For', 'example', 'some_var', 'would', 'become', 'someVar', '.']
train
https://github.com/CitrineInformatics/pypif/blob/938348a8ff7b10b330770cccaaeb2109922f681b/pypif/util/case.py#L8-L16
264
meejah/txtorcon
txtorcon/endpoints.py
get_global_tor_instance
def get_global_tor_instance(reactor, control_port=None, progress_updates=None, _tor_launcher=None): """ Normal users shouldn't need to call this; use TCPHiddenServiceEndpoint::system_tor instead. :return Tor: a 'global to this Python process' instance of Tor. There isn't one of these until the first time this method is called. All calls to this method return the same instance. """ global _global_tor global _global_tor_lock yield _global_tor_lock.acquire() if _tor_launcher is None: # XXX :( mutual dependencies...really get_global_tor_instance # should be in controller.py if it's going to return a Tor # instance. from .controller import launch _tor_launcher = launch try: if _global_tor is None: _global_tor = yield _tor_launcher(reactor, progress_updates=progress_updates) else: config = yield _global_tor.get_config() already_port = config.ControlPort if control_port is not None and control_port != already_port: raise RuntimeError( "ControlPort is already '{}', but you wanted '{}'", already_port, control_port, ) defer.returnValue(_global_tor) finally: _global_tor_lock.release()
python
def get_global_tor_instance(reactor, control_port=None, progress_updates=None, _tor_launcher=None): """ Normal users shouldn't need to call this; use TCPHiddenServiceEndpoint::system_tor instead. :return Tor: a 'global to this Python process' instance of Tor. There isn't one of these until the first time this method is called. All calls to this method return the same instance. """ global _global_tor global _global_tor_lock yield _global_tor_lock.acquire() if _tor_launcher is None: # XXX :( mutual dependencies...really get_global_tor_instance # should be in controller.py if it's going to return a Tor # instance. from .controller import launch _tor_launcher = launch try: if _global_tor is None: _global_tor = yield _tor_launcher(reactor, progress_updates=progress_updates) else: config = yield _global_tor.get_config() already_port = config.ControlPort if control_port is not None and control_port != already_port: raise RuntimeError( "ControlPort is already '{}', but you wanted '{}'", already_port, control_port, ) defer.returnValue(_global_tor) finally: _global_tor_lock.release()
['def', 'get_global_tor_instance', '(', 'reactor', ',', 'control_port', '=', 'None', ',', 'progress_updates', '=', 'None', ',', '_tor_launcher', '=', 'None', ')', ':', 'global', '_global_tor', 'global', '_global_tor_lock', 'yield', '_global_tor_lock', '.', 'acquire', '(', ')', 'if', '_tor_launcher', 'is', 'None', ':', '# XXX :( mutual dependencies...really get_global_tor_instance', "# should be in controller.py if it's going to return a Tor", '# instance.', 'from', '.', 'controller', 'import', 'launch', '_tor_launcher', '=', 'launch', 'try', ':', 'if', '_global_tor', 'is', 'None', ':', '_global_tor', '=', 'yield', '_tor_launcher', '(', 'reactor', ',', 'progress_updates', '=', 'progress_updates', ')', 'else', ':', 'config', '=', 'yield', '_global_tor', '.', 'get_config', '(', ')', 'already_port', '=', 'config', '.', 'ControlPort', 'if', 'control_port', 'is', 'not', 'None', 'and', 'control_port', '!=', 'already_port', ':', 'raise', 'RuntimeError', '(', '"ControlPort is already \'{}\', but you wanted \'{}\'"', ',', 'already_port', ',', 'control_port', ',', ')', 'defer', '.', 'returnValue', '(', '_global_tor', ')', 'finally', ':', '_global_tor_lock', '.', 'release', '(', ')']
Normal users shouldn't need to call this; use TCPHiddenServiceEndpoint::system_tor instead. :return Tor: a 'global to this Python process' instance of Tor. There isn't one of these until the first time this method is called. All calls to this method return the same instance.
['Normal', 'users', 'shouldn', 't', 'need', 'to', 'call', 'this', ';', 'use', 'TCPHiddenServiceEndpoint', '::', 'system_tor', 'instead', '.']
train
https://github.com/meejah/txtorcon/blob/14053b95adf0b4bd9dd9c317bece912a26578a93/txtorcon/endpoints.py#L66-L105
265
StackStorm/pybind
pybind/slxos/v17r_2_00/telemetry/profile/__init__.py
profile._set_enhanced_voq_max_queue_depth
def _set_enhanced_voq_max_queue_depth(self, v, load=False): """ Setter method for enhanced_voq_max_queue_depth, mapped from YANG variable /telemetry/profile/enhanced_voq_max_queue_depth (list) If this variable is read-only (config: false) in the source YANG file, then _set_enhanced_voq_max_queue_depth is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_enhanced_voq_max_queue_depth() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",enhanced_voq_max_queue_depth.enhanced_voq_max_queue_depth, yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}), is_container='list', yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """enhanced_voq_max_queue_depth must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name",enhanced_voq_max_queue_depth.enhanced_voq_max_queue_depth, yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}), is_container='list', yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""", }) self.__enhanced_voq_max_queue_depth = t if hasattr(self, '_set'): self._set()
python
def _set_enhanced_voq_max_queue_depth(self, v, load=False): """ Setter method for enhanced_voq_max_queue_depth, mapped from YANG variable /telemetry/profile/enhanced_voq_max_queue_depth (list) If this variable is read-only (config: false) in the source YANG file, then _set_enhanced_voq_max_queue_depth is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_enhanced_voq_max_queue_depth() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("name",enhanced_voq_max_queue_depth.enhanced_voq_max_queue_depth, yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}), is_container='list', yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """enhanced_voq_max_queue_depth must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("name",enhanced_voq_max_queue_depth.enhanced_voq_max_queue_depth, yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}), is_container='list', yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'EnhancedVoqMaxQueueDepthProfile', u'info': u'Enhanced VOQ max queue depth'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""", }) self.__enhanced_voq_max_queue_depth = t if hasattr(self, '_set'): self._set()
['def', '_set_enhanced_voq_max_queue_depth', '(', 'self', ',', 'v', ',', 'load', '=', 'False', ')', ':', 'if', 'hasattr', '(', 'v', ',', '"_utype"', ')', ':', 'v', '=', 'v', '.', '_utype', '(', 'v', ')', 'try', ':', 't', '=', 'YANGDynClass', '(', 'v', ',', 'base', '=', 'YANGListType', '(', '"name"', ',', 'enhanced_voq_max_queue_depth', '.', 'enhanced_voq_max_queue_depth', ',', 'yang_name', '=', '"enhanced-voq-max-queue-depth"', ',', 'rest_name', '=', '"enhanced-voq-max-queue-depth"', ',', 'parent', '=', 'self', ',', 'is_container', '=', "'list'", ',', 'user_ordered', '=', 'False', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'yang_keys', '=', "'name'", ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'cli-full-command'", ':', 'None', ',', "u'cli-suppress-list-no'", ':', 'None', ',', "u'callpoint'", ':', "u'EnhancedVoqMaxQueueDepthProfile'", ',', "u'info'", ':', "u'Enhanced VOQ max queue depth'", '}', '}', ')', ',', 'is_container', '=', "'list'", ',', 'yang_name', '=', '"enhanced-voq-max-queue-depth"', ',', 'rest_name', '=', '"enhanced-voq-max-queue-depth"', ',', 'parent', '=', 'self', ',', 'path_helper', '=', 'self', '.', '_path_helper', ',', 'extmethods', '=', 'self', '.', '_extmethods', ',', 'register_paths', '=', 'True', ',', 'extensions', '=', '{', "u'tailf-common'", ':', '{', "u'cli-full-command'", ':', 'None', ',', "u'cli-suppress-list-no'", ':', 'None', ',', "u'callpoint'", ':', "u'EnhancedVoqMaxQueueDepthProfile'", ',', "u'info'", ':', "u'Enhanced VOQ max queue depth'", '}', '}', ',', 'namespace', '=', "'urn:brocade.com:mgmt:brocade-telemetry'", ',', 'defining_module', '=', "'brocade-telemetry'", ',', 'yang_type', '=', "'list'", ',', 'is_config', '=', 'True', ')', 'except', '(', 'TypeError', ',', 'ValueError', ')', ':', 'raise', 'ValueError', '(', '{', "'error-string'", ':', '"""enhanced_voq_max_queue_depth must be of a type compatible with list"""', ',', "'defined-type'", ':', '"list"', ',', "'generated-type'", ':', '"""YANGDynClass(base=YANGListType("name",enhanced_voq_max_queue_depth.enhanced_voq_max_queue_depth, yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'name\', extensions={u\'tailf-common\': {u\'cli-full-command\': None, u\'cli-suppress-list-no\': None, u\'callpoint\': u\'EnhancedVoqMaxQueueDepthProfile\', u\'info\': u\'Enhanced VOQ max queue depth\'}}), is_container=\'list\', yang_name="enhanced-voq-max-queue-depth", rest_name="enhanced-voq-max-queue-depth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'cli-full-command\': None, u\'cli-suppress-list-no\': None, u\'callpoint\': u\'EnhancedVoqMaxQueueDepthProfile\', u\'info\': u\'Enhanced VOQ max queue depth\'}}, namespace=\'urn:brocade.com:mgmt:brocade-telemetry\', defining_module=\'brocade-telemetry\', yang_type=\'list\', is_config=True)"""', ',', '}', ')', 'self', '.', '__enhanced_voq_max_queue_depth', '=', 't', 'if', 'hasattr', '(', 'self', ',', "'_set'", ')', ':', 'self', '.', '_set', '(', ')']
Setter method for enhanced_voq_max_queue_depth, mapped from YANG variable /telemetry/profile/enhanced_voq_max_queue_depth (list) If this variable is read-only (config: false) in the source YANG file, then _set_enhanced_voq_max_queue_depth is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_enhanced_voq_max_queue_depth() directly.
['Setter', 'method', 'for', 'enhanced_voq_max_queue_depth', 'mapped', 'from', 'YANG', 'variable', '/', 'telemetry', '/', 'profile', '/', 'enhanced_voq_max_queue_depth', '(', 'list', ')', 'If', 'this', 'variable', 'is', 'read', '-', 'only', '(', 'config', ':', 'false', ')', 'in', 'the', 'source', 'YANG', 'file', 'then', '_set_enhanced_voq_max_queue_depth', 'is', 'considered', 'as', 'a', 'private', 'method', '.', 'Backends', 'looking', 'to', 'populate', 'this', 'variable', 'should', 'do', 'so', 'via', 'calling', 'thisObj', '.', '_set_enhanced_voq_max_queue_depth', '()', 'directly', '.']
train
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17r_2_00/telemetry/profile/__init__.py#L205-L226
266
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/srtm.py
SRTMTile._avg
def _avg(value1, value2, weight): """Returns the weighted average of two values and handles the case where one value is None. If both values are None, None is returned. """ if value1 is None: return value2 if value2 is None: return value1 return value2 * weight + value1 * (1 - weight)
python
def _avg(value1, value2, weight): """Returns the weighted average of two values and handles the case where one value is None. If both values are None, None is returned. """ if value1 is None: return value2 if value2 is None: return value1 return value2 * weight + value1 * (1 - weight)
['def', '_avg', '(', 'value1', ',', 'value2', ',', 'weight', ')', ':', 'if', 'value1', 'is', 'None', ':', 'return', 'value2', 'if', 'value2', 'is', 'None', ':', 'return', 'value1', 'return', 'value2', '*', 'weight', '+', 'value1', '*', '(', '1', '-', 'weight', ')']
Returns the weighted average of two values and handles the case where one value is None. If both values are None, None is returned.
['Returns', 'the', 'weighted', 'average', 'of', 'two', 'values', 'and', 'handles', 'the', 'case', 'where', 'one', 'value', 'is', 'None', '.', 'If', 'both', 'values', 'are', 'None', 'None', 'is', 'returned', '.']
train
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/srtm.py#L305-L313
267
sorrowless/battery_systray
batticon/batticon.py
Indicator.right_click_event_statusicon
def right_click_event_statusicon(self, icon, button, time): """ It's just way how popup menu works in GTK. Don't ask me how it works. """ def pos(menu, aicon): """Just return menu""" return Gtk.StatusIcon.position_menu(menu, aicon) self.menu.popup(None, None, pos, icon, button, time)
python
def right_click_event_statusicon(self, icon, button, time): """ It's just way how popup menu works in GTK. Don't ask me how it works. """ def pos(menu, aicon): """Just return menu""" return Gtk.StatusIcon.position_menu(menu, aicon) self.menu.popup(None, None, pos, icon, button, time)
['def', 'right_click_event_statusicon', '(', 'self', ',', 'icon', ',', 'button', ',', 'time', ')', ':', 'def', 'pos', '(', 'menu', ',', 'aicon', ')', ':', '"""Just return menu"""', 'return', 'Gtk', '.', 'StatusIcon', '.', 'position_menu', '(', 'menu', ',', 'aicon', ')', 'self', '.', 'menu', '.', 'popup', '(', 'None', ',', 'None', ',', 'pos', ',', 'icon', ',', 'button', ',', 'time', ')']
It's just way how popup menu works in GTK. Don't ask me how it works.
['It', 's', 'just', 'way', 'how', 'popup', 'menu', 'works', 'in', 'GTK', '.', 'Don', 't', 'ask', 'me', 'how', 'it', 'works', '.']
train
https://github.com/sorrowless/battery_systray/blob/4594fca6f357660e081c2800af4a8b21c607bef1/batticon/batticon.py#L70-L79
268
Unidata/siphon
siphon/ncss_dataset.py
_Types.handle_typed_values
def handle_typed_values(val, type_name, value_type): """Translate typed values into the appropriate python object. Takes an element name, value, and type and returns a list with the string value(s) properly converted to a python type. TypedValues are handled in ucar.ma2.DataType in netcdfJava in the DataType enum. Possibilities are: "boolean" "byte" "char" "short" "int" "long" "float" "double" "Sequence" "String" "Structure" "enum1" "enum2" "enum4" "opaque" "object" All of these are values written as strings in the xml, so simply applying int, float to the values will work in most cases (i.e. the TDS encodes them as string values properly). Examle XML element: <attribute name="scale_factor" type="double" value="0.0010000000474974513"/> Parameters ---------- val : string The string representation of the value attribute of the xml element type_name : string The string representation of the name attribute of the xml element value_type : string The string representation of the type attribute of the xml element Returns ------- val : list A list containing the properly typed python values. """ if value_type in ['byte', 'short', 'int', 'long']: try: val = [int(v) for v in re.split('[ ,]', val) if v] except ValueError: log.warning('Cannot convert "%s" to int. Keeping type as str.', val) elif value_type in ['float', 'double']: try: val = [float(v) for v in re.split('[ ,]', val) if v] except ValueError: log.warning('Cannot convert "%s" to float. Keeping type as str.', val) elif value_type == 'boolean': try: # special case for boolean type val = val.split() # values must be either true or false for potential_bool in val: if potential_bool not in ['true', 'false']: raise ValueError val = [True if item == 'true' else False for item in val] except ValueError: msg = 'Cannot convert values %s to boolean.' msg += ' Keeping type as str.' log.warning(msg, val) elif value_type == 'String': # nothing special for String type pass else: # possibilities - Sequence, Structure, enum, opaque, object, # and char. # Not sure how to handle these as I do not have an example # of how they would show up in dataset.xml log.warning('%s type %s not understood. Keeping as String.', type_name, value_type) if not isinstance(val, list): val = [val] return val
python
def handle_typed_values(val, type_name, value_type): """Translate typed values into the appropriate python object. Takes an element name, value, and type and returns a list with the string value(s) properly converted to a python type. TypedValues are handled in ucar.ma2.DataType in netcdfJava in the DataType enum. Possibilities are: "boolean" "byte" "char" "short" "int" "long" "float" "double" "Sequence" "String" "Structure" "enum1" "enum2" "enum4" "opaque" "object" All of these are values written as strings in the xml, so simply applying int, float to the values will work in most cases (i.e. the TDS encodes them as string values properly). Examle XML element: <attribute name="scale_factor" type="double" value="0.0010000000474974513"/> Parameters ---------- val : string The string representation of the value attribute of the xml element type_name : string The string representation of the name attribute of the xml element value_type : string The string representation of the type attribute of the xml element Returns ------- val : list A list containing the properly typed python values. """ if value_type in ['byte', 'short', 'int', 'long']: try: val = [int(v) for v in re.split('[ ,]', val) if v] except ValueError: log.warning('Cannot convert "%s" to int. Keeping type as str.', val) elif value_type in ['float', 'double']: try: val = [float(v) for v in re.split('[ ,]', val) if v] except ValueError: log.warning('Cannot convert "%s" to float. Keeping type as str.', val) elif value_type == 'boolean': try: # special case for boolean type val = val.split() # values must be either true or false for potential_bool in val: if potential_bool not in ['true', 'false']: raise ValueError val = [True if item == 'true' else False for item in val] except ValueError: msg = 'Cannot convert values %s to boolean.' msg += ' Keeping type as str.' log.warning(msg, val) elif value_type == 'String': # nothing special for String type pass else: # possibilities - Sequence, Structure, enum, opaque, object, # and char. # Not sure how to handle these as I do not have an example # of how they would show up in dataset.xml log.warning('%s type %s not understood. Keeping as String.', type_name, value_type) if not isinstance(val, list): val = [val] return val
['def', 'handle_typed_values', '(', 'val', ',', 'type_name', ',', 'value_type', ')', ':', 'if', 'value_type', 'in', '[', "'byte'", ',', "'short'", ',', "'int'", ',', "'long'", ']', ':', 'try', ':', 'val', '=', '[', 'int', '(', 'v', ')', 'for', 'v', 'in', 're', '.', 'split', '(', "'[ ,]'", ',', 'val', ')', 'if', 'v', ']', 'except', 'ValueError', ':', 'log', '.', 'warning', '(', '\'Cannot convert "%s" to int. Keeping type as str.\'', ',', 'val', ')', 'elif', 'value_type', 'in', '[', "'float'", ',', "'double'", ']', ':', 'try', ':', 'val', '=', '[', 'float', '(', 'v', ')', 'for', 'v', 'in', 're', '.', 'split', '(', "'[ ,]'", ',', 'val', ')', 'if', 'v', ']', 'except', 'ValueError', ':', 'log', '.', 'warning', '(', '\'Cannot convert "%s" to float. Keeping type as str.\'', ',', 'val', ')', 'elif', 'value_type', '==', "'boolean'", ':', 'try', ':', '# special case for boolean type', 'val', '=', 'val', '.', 'split', '(', ')', '# values must be either true or false', 'for', 'potential_bool', 'in', 'val', ':', 'if', 'potential_bool', 'not', 'in', '[', "'true'", ',', "'false'", ']', ':', 'raise', 'ValueError', 'val', '=', '[', 'True', 'if', 'item', '==', "'true'", 'else', 'False', 'for', 'item', 'in', 'val', ']', 'except', 'ValueError', ':', 'msg', '=', "'Cannot convert values %s to boolean.'", 'msg', '+=', "' Keeping type as str.'", 'log', '.', 'warning', '(', 'msg', ',', 'val', ')', 'elif', 'value_type', '==', "'String'", ':', '# nothing special for String type', 'pass', 'else', ':', '# possibilities - Sequence, Structure, enum, opaque, object,', '# and char.', '# Not sure how to handle these as I do not have an example', '# of how they would show up in dataset.xml', 'log', '.', 'warning', '(', "'%s type %s not understood. Keeping as String.'", ',', 'type_name', ',', 'value_type', ')', 'if', 'not', 'isinstance', '(', 'val', ',', 'list', ')', ':', 'val', '=', '[', 'val', ']', 'return', 'val']
Translate typed values into the appropriate python object. Takes an element name, value, and type and returns a list with the string value(s) properly converted to a python type. TypedValues are handled in ucar.ma2.DataType in netcdfJava in the DataType enum. Possibilities are: "boolean" "byte" "char" "short" "int" "long" "float" "double" "Sequence" "String" "Structure" "enum1" "enum2" "enum4" "opaque" "object" All of these are values written as strings in the xml, so simply applying int, float to the values will work in most cases (i.e. the TDS encodes them as string values properly). Examle XML element: <attribute name="scale_factor" type="double" value="0.0010000000474974513"/> Parameters ---------- val : string The string representation of the value attribute of the xml element type_name : string The string representation of the name attribute of the xml element value_type : string The string representation of the type attribute of the xml element Returns ------- val : list A list containing the properly typed python values.
['Translate', 'typed', 'values', 'into', 'the', 'appropriate', 'python', 'object', '.']
train
https://github.com/Unidata/siphon/blob/53fb0d84fbce1c18c8e81c9e68bc81620ee0a6ac/siphon/ncss_dataset.py#L26-L114
269
Robpol86/colorclass
colorclass/core.py
ColorStr.center
def center(self, width, fillchar=None): """Return centered in a string of length width. Padding is done using the specified fill character or space. :param int width: Length of output string. :param str fillchar: Use this character instead of spaces. """ if fillchar is not None: result = self.value_no_colors.center(width, fillchar) else: result = self.value_no_colors.center(width) return self.__class__(result.replace(self.value_no_colors, self.value_colors), keep_tags=True)
python
def center(self, width, fillchar=None): """Return centered in a string of length width. Padding is done using the specified fill character or space. :param int width: Length of output string. :param str fillchar: Use this character instead of spaces. """ if fillchar is not None: result = self.value_no_colors.center(width, fillchar) else: result = self.value_no_colors.center(width) return self.__class__(result.replace(self.value_no_colors, self.value_colors), keep_tags=True)
['def', 'center', '(', 'self', ',', 'width', ',', 'fillchar', '=', 'None', ')', ':', 'if', 'fillchar', 'is', 'not', 'None', ':', 'result', '=', 'self', '.', 'value_no_colors', '.', 'center', '(', 'width', ',', 'fillchar', ')', 'else', ':', 'result', '=', 'self', '.', 'value_no_colors', '.', 'center', '(', 'width', ')', 'return', 'self', '.', '__class__', '(', 'result', '.', 'replace', '(', 'self', '.', 'value_no_colors', ',', 'self', '.', 'value_colors', ')', ',', 'keep_tags', '=', 'True', ')']
Return centered in a string of length width. Padding is done using the specified fill character or space. :param int width: Length of output string. :param str fillchar: Use this character instead of spaces.
['Return', 'centered', 'in', 'a', 'string', 'of', 'length', 'width', '.', 'Padding', 'is', 'done', 'using', 'the', 'specified', 'fill', 'character', 'or', 'space', '.']
train
https://github.com/Robpol86/colorclass/blob/692e2d6f5ad470b6221c8cb9641970dc5563a572/colorclass/core.py#L111-L121
270
twisted/txaws
txaws/server/registry.py
Registry.get
def get(self, action, version=None): """Get the method class handing the given action and version.""" by_version = self._by_action[action] if version in by_version: return by_version[version] else: return by_version[None]
python
def get(self, action, version=None): """Get the method class handing the given action and version.""" by_version = self._by_action[action] if version in by_version: return by_version[version] else: return by_version[None]
['def', 'get', '(', 'self', ',', 'action', ',', 'version', '=', 'None', ')', ':', 'by_version', '=', 'self', '.', '_by_action', '[', 'action', ']', 'if', 'version', 'in', 'by_version', ':', 'return', 'by_version', '[', 'version', ']', 'else', ':', 'return', 'by_version', '[', 'None', ']']
Get the method class handing the given action and version.
['Get', 'the', 'method', 'class', 'handing', 'the', 'given', 'action', 'and', 'version', '.']
train
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/registry.py#L38-L44
271
kervi/kervi-core
kervi/hal/i2c.py
II2CDeviceDriver.reverse_byte_order
def reverse_byte_order(self, data): """Reverses the byte order of an int (16-bit) or long (32-bit) value.""" # Courtesy Vishal Sapre byte_count = len(hex(data)[2:].replace('L', '')[::2]) val = 0 for i in range(byte_count): val = (val << 8) | (data & 0xff) data >>= 8 return val
python
def reverse_byte_order(self, data): """Reverses the byte order of an int (16-bit) or long (32-bit) value.""" # Courtesy Vishal Sapre byte_count = len(hex(data)[2:].replace('L', '')[::2]) val = 0 for i in range(byte_count): val = (val << 8) | (data & 0xff) data >>= 8 return val
['def', 'reverse_byte_order', '(', 'self', ',', 'data', ')', ':', '# Courtesy Vishal Sapre', 'byte_count', '=', 'len', '(', 'hex', '(', 'data', ')', '[', '2', ':', ']', '.', 'replace', '(', "'L'", ',', "''", ')', '[', ':', ':', '2', ']', ')', 'val', '=', '0', 'for', 'i', 'in', 'range', '(', 'byte_count', ')', ':', 'val', '=', '(', 'val', '<<', '8', ')', '|', '(', 'data', '&', '0xff', ')', 'data', '>>=', '8', 'return', 'val']
Reverses the byte order of an int (16-bit) or long (32-bit) value.
['Reverses', 'the', 'byte', 'order', 'of', 'an', 'int', '(', '16', '-', 'bit', ')', 'or', 'long', '(', '32', '-', 'bit', ')', 'value', '.']
train
https://github.com/kervi/kervi-core/blob/3c1e3c8a17a7b4d085d8a28b99180ff2a96b0e23/kervi/hal/i2c.py#L23-L31
272
markfinger/python-nodejs
nodejs/interrogate.py
run_command
def run_command(cmd_to_run): """ Wrapper around subprocess that pipes the stderr and stdout from `cmd_to_run` to temporary files. Using the temporary files gets around subprocess.PIPE's issues with handling large buffers. Note: this command will block the python process until `cmd_to_run` has completed. Returns a tuple, containing the stderr and stdout as strings. """ with tempfile.TemporaryFile() as stdout_file, tempfile.TemporaryFile() as stderr_file: # Run the command popen = subprocess.Popen(cmd_to_run, stdout=stdout_file, stderr=stderr_file) popen.wait() stderr_file.seek(0) stdout_file.seek(0) stderr = stderr_file.read() stdout = stdout_file.read() if six.PY3: stderr = stderr.decode() stdout = stdout.decode() return stderr, stdout
python
def run_command(cmd_to_run): """ Wrapper around subprocess that pipes the stderr and stdout from `cmd_to_run` to temporary files. Using the temporary files gets around subprocess.PIPE's issues with handling large buffers. Note: this command will block the python process until `cmd_to_run` has completed. Returns a tuple, containing the stderr and stdout as strings. """ with tempfile.TemporaryFile() as stdout_file, tempfile.TemporaryFile() as stderr_file: # Run the command popen = subprocess.Popen(cmd_to_run, stdout=stdout_file, stderr=stderr_file) popen.wait() stderr_file.seek(0) stdout_file.seek(0) stderr = stderr_file.read() stdout = stdout_file.read() if six.PY3: stderr = stderr.decode() stdout = stdout.decode() return stderr, stdout
['def', 'run_command', '(', 'cmd_to_run', ')', ':', 'with', 'tempfile', '.', 'TemporaryFile', '(', ')', 'as', 'stdout_file', ',', 'tempfile', '.', 'TemporaryFile', '(', ')', 'as', 'stderr_file', ':', '# Run the command', 'popen', '=', 'subprocess', '.', 'Popen', '(', 'cmd_to_run', ',', 'stdout', '=', 'stdout_file', ',', 'stderr', '=', 'stderr_file', ')', 'popen', '.', 'wait', '(', ')', 'stderr_file', '.', 'seek', '(', '0', ')', 'stdout_file', '.', 'seek', '(', '0', ')', 'stderr', '=', 'stderr_file', '.', 'read', '(', ')', 'stdout', '=', 'stdout_file', '.', 'read', '(', ')', 'if', 'six', '.', 'PY3', ':', 'stderr', '=', 'stderr', '.', 'decode', '(', ')', 'stdout', '=', 'stdout', '.', 'decode', '(', ')', 'return', 'stderr', ',', 'stdout']
Wrapper around subprocess that pipes the stderr and stdout from `cmd_to_run` to temporary files. Using the temporary files gets around subprocess.PIPE's issues with handling large buffers. Note: this command will block the python process until `cmd_to_run` has completed. Returns a tuple, containing the stderr and stdout as strings.
['Wrapper', 'around', 'subprocess', 'that', 'pipes', 'the', 'stderr', 'and', 'stdout', 'from', 'cmd_to_run', 'to', 'temporary', 'files', '.', 'Using', 'the', 'temporary', 'files', 'gets', 'around', 'subprocess', '.', 'PIPE', 's', 'issues', 'with', 'handling', 'large', 'buffers', '.']
train
https://github.com/markfinger/python-nodejs/blob/3c0c84e953b9af68cbc3f124f1802361baf006bb/nodejs/interrogate.py#L8-L34
273
SpriteLink/NIPAP
nipap-cli/nipap_cli/nipap_cli.py
_complete_string
def _complete_string(key, haystack): """ Returns valid string completions Takes the string 'key' and compares it to each of the strings in 'haystack'. The ones which beginns with 'key' are returned as result. """ if len(key) == 0: return haystack match = [] for straw in haystack: if string.find(straw, key) == 0: match.append(straw) return match
python
def _complete_string(key, haystack): """ Returns valid string completions Takes the string 'key' and compares it to each of the strings in 'haystack'. The ones which beginns with 'key' are returned as result. """ if len(key) == 0: return haystack match = [] for straw in haystack: if string.find(straw, key) == 0: match.append(straw) return match
['def', '_complete_string', '(', 'key', ',', 'haystack', ')', ':', 'if', 'len', '(', 'key', ')', '==', '0', ':', 'return', 'haystack', 'match', '=', '[', ']', 'for', 'straw', 'in', 'haystack', ':', 'if', 'string', '.', 'find', '(', 'straw', ',', 'key', ')', '==', '0', ':', 'match', '.', 'append', '(', 'straw', ')', 'return', 'match']
Returns valid string completions Takes the string 'key' and compares it to each of the strings in 'haystack'. The ones which beginns with 'key' are returned as result.
['Returns', 'valid', 'string', 'completions']
train
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap-cli/nipap_cli/nipap_cli.py#L1927-L1941
274
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
MoveDifferential.on_to_coordinates
def on_to_coordinates(self, speed, x_target_mm, y_target_mm, brake=True, block=True): """ Drive to (`x_target_mm`, `y_target_mm`) coordinates at `speed` """ assert self.odometry_thread_id, "odometry_start() must be called to track robot coordinates" # stop moving self.off(brake='hold') # rotate in place so we are pointed straight at our target x_delta = x_target_mm - self.x_pos_mm y_delta = y_target_mm - self.y_pos_mm angle_target_radians = math.atan2(y_delta, x_delta) angle_target_degrees = math.degrees(angle_target_radians) self.turn_to_angle(speed, angle_target_degrees, brake=True, block=True) # drive in a straight line to the target coordinates distance_mm = math.sqrt(pow(self.x_pos_mm - x_target_mm, 2) + pow(self.y_pos_mm - y_target_mm, 2)) self.on_for_distance(speed, distance_mm, brake, block)
python
def on_to_coordinates(self, speed, x_target_mm, y_target_mm, brake=True, block=True): """ Drive to (`x_target_mm`, `y_target_mm`) coordinates at `speed` """ assert self.odometry_thread_id, "odometry_start() must be called to track robot coordinates" # stop moving self.off(brake='hold') # rotate in place so we are pointed straight at our target x_delta = x_target_mm - self.x_pos_mm y_delta = y_target_mm - self.y_pos_mm angle_target_radians = math.atan2(y_delta, x_delta) angle_target_degrees = math.degrees(angle_target_radians) self.turn_to_angle(speed, angle_target_degrees, brake=True, block=True) # drive in a straight line to the target coordinates distance_mm = math.sqrt(pow(self.x_pos_mm - x_target_mm, 2) + pow(self.y_pos_mm - y_target_mm, 2)) self.on_for_distance(speed, distance_mm, brake, block)
['def', 'on_to_coordinates', '(', 'self', ',', 'speed', ',', 'x_target_mm', ',', 'y_target_mm', ',', 'brake', '=', 'True', ',', 'block', '=', 'True', ')', ':', 'assert', 'self', '.', 'odometry_thread_id', ',', '"odometry_start() must be called to track robot coordinates"', '# stop moving', 'self', '.', 'off', '(', 'brake', '=', "'hold'", ')', '# rotate in place so we are pointed straight at our target', 'x_delta', '=', 'x_target_mm', '-', 'self', '.', 'x_pos_mm', 'y_delta', '=', 'y_target_mm', '-', 'self', '.', 'y_pos_mm', 'angle_target_radians', '=', 'math', '.', 'atan2', '(', 'y_delta', ',', 'x_delta', ')', 'angle_target_degrees', '=', 'math', '.', 'degrees', '(', 'angle_target_radians', ')', 'self', '.', 'turn_to_angle', '(', 'speed', ',', 'angle_target_degrees', ',', 'brake', '=', 'True', ',', 'block', '=', 'True', ')', '# drive in a straight line to the target coordinates', 'distance_mm', '=', 'math', '.', 'sqrt', '(', 'pow', '(', 'self', '.', 'x_pos_mm', '-', 'x_target_mm', ',', '2', ')', '+', 'pow', '(', 'self', '.', 'y_pos_mm', '-', 'y_target_mm', ',', '2', ')', ')', 'self', '.', 'on_for_distance', '(', 'speed', ',', 'distance_mm', ',', 'brake', ',', 'block', ')']
Drive to (`x_target_mm`, `y_target_mm`) coordinates at `speed`
['Drive', 'to', '(', 'x_target_mm', 'y_target_mm', ')', 'coordinates', 'at', 'speed']
train
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L2349-L2367
275
gem/oq-engine
openquake/hmtk/parsers/catalogue/gcmt_ndk_parser.py
ParseNDKtoGCMT._read_hypocentre_from_ndk_string
def _read_hypocentre_from_ndk_string(self, linestring): """ Reads the hypocentre data from the ndk string to return an instance of the GCMTHypocentre class """ hypo = GCMTHypocentre() hypo.source = linestring[0:4] hypo.date = _read_date_from_string(linestring[5:15]) hypo.time = _read_time_from_string(linestring[16:26]) hypo.latitude = float(linestring[27:33]) hypo.longitude = float(linestring[34:41]) hypo.depth = float(linestring[42:47]) magnitudes = [float(x) for x in linestring[48:55].split(' ')] if magnitudes[0] > 0.: hypo.m_b = magnitudes[0] if magnitudes[1] > 0.: hypo.m_s = magnitudes[1] hypo.location = linestring[56:] return hypo
python
def _read_hypocentre_from_ndk_string(self, linestring): """ Reads the hypocentre data from the ndk string to return an instance of the GCMTHypocentre class """ hypo = GCMTHypocentre() hypo.source = linestring[0:4] hypo.date = _read_date_from_string(linestring[5:15]) hypo.time = _read_time_from_string(linestring[16:26]) hypo.latitude = float(linestring[27:33]) hypo.longitude = float(linestring[34:41]) hypo.depth = float(linestring[42:47]) magnitudes = [float(x) for x in linestring[48:55].split(' ')] if magnitudes[0] > 0.: hypo.m_b = magnitudes[0] if magnitudes[1] > 0.: hypo.m_s = magnitudes[1] hypo.location = linestring[56:] return hypo
['def', '_read_hypocentre_from_ndk_string', '(', 'self', ',', 'linestring', ')', ':', 'hypo', '=', 'GCMTHypocentre', '(', ')', 'hypo', '.', 'source', '=', 'linestring', '[', '0', ':', '4', ']', 'hypo', '.', 'date', '=', '_read_date_from_string', '(', 'linestring', '[', '5', ':', '15', ']', ')', 'hypo', '.', 'time', '=', '_read_time_from_string', '(', 'linestring', '[', '16', ':', '26', ']', ')', 'hypo', '.', 'latitude', '=', 'float', '(', 'linestring', '[', '27', ':', '33', ']', ')', 'hypo', '.', 'longitude', '=', 'float', '(', 'linestring', '[', '34', ':', '41', ']', ')', 'hypo', '.', 'depth', '=', 'float', '(', 'linestring', '[', '42', ':', '47', ']', ')', 'magnitudes', '=', '[', 'float', '(', 'x', ')', 'for', 'x', 'in', 'linestring', '[', '48', ':', '55', ']', '.', 'split', '(', "' '", ')', ']', 'if', 'magnitudes', '[', '0', ']', '>', '0.', ':', 'hypo', '.', 'm_b', '=', 'magnitudes', '[', '0', ']', 'if', 'magnitudes', '[', '1', ']', '>', '0.', ':', 'hypo', '.', 'm_s', '=', 'magnitudes', '[', '1', ']', 'hypo', '.', 'location', '=', 'linestring', '[', '56', ':', ']', 'return', 'hypo']
Reads the hypocentre data from the ndk string to return an instance of the GCMTHypocentre class
['Reads', 'the', 'hypocentre', 'data', 'from', 'the', 'ndk', 'string', 'to', 'return', 'an', 'instance', 'of', 'the', 'GCMTHypocentre', 'class']
train
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/parsers/catalogue/gcmt_ndk_parser.py#L315-L333
276
dbcli/athenacli
athenacli/main.py
AthenaCli.handle_editor_command
def handle_editor_command(self, cli, document): """ Editor command is any query that is prefixed or suffixed by a '\e'. The reason for a while loop is because a user might edit a query multiple times. For eg: "select * from \e"<enter> to edit it in vim, then come back to the prompt with the edited query "select * from blah where q = 'abc'\e" to edit it again. :param cli: CommandLineInterface :param document: Document :return: Document """ # FIXME: using application.pre_run_callables like this here is not the best solution. # It's internal api of prompt_toolkit that may change. This was added to fix # https://github.com/dbcli/pgcli/issues/668. We may find a better way to do it in the future. saved_callables = cli.application.pre_run_callables while special.editor_command(document.text): filename = special.get_filename(document.text) query = (special.get_editor_query(document.text) or self.get_last_query()) sql, message = special.open_external_editor(filename, sql=query) if message: # Something went wrong. Raise an exception and bail. raise RuntimeError(message) cli.current_buffer.document = Document(sql, cursor_position=len(sql)) cli.application.pre_run_callables = [] document = cli.run() continue cli.application.pre_run_callables = saved_callables return document
python
def handle_editor_command(self, cli, document): """ Editor command is any query that is prefixed or suffixed by a '\e'. The reason for a while loop is because a user might edit a query multiple times. For eg: "select * from \e"<enter> to edit it in vim, then come back to the prompt with the edited query "select * from blah where q = 'abc'\e" to edit it again. :param cli: CommandLineInterface :param document: Document :return: Document """ # FIXME: using application.pre_run_callables like this here is not the best solution. # It's internal api of prompt_toolkit that may change. This was added to fix # https://github.com/dbcli/pgcli/issues/668. We may find a better way to do it in the future. saved_callables = cli.application.pre_run_callables while special.editor_command(document.text): filename = special.get_filename(document.text) query = (special.get_editor_query(document.text) or self.get_last_query()) sql, message = special.open_external_editor(filename, sql=query) if message: # Something went wrong. Raise an exception and bail. raise RuntimeError(message) cli.current_buffer.document = Document(sql, cursor_position=len(sql)) cli.application.pre_run_callables = [] document = cli.run() continue cli.application.pre_run_callables = saved_callables return document
['def', 'handle_editor_command', '(', 'self', ',', 'cli', ',', 'document', ')', ':', '# FIXME: using application.pre_run_callables like this here is not the best solution.', "# It's internal api of prompt_toolkit that may change. This was added to fix", '# https://github.com/dbcli/pgcli/issues/668. We may find a better way to do it in the future.', 'saved_callables', '=', 'cli', '.', 'application', '.', 'pre_run_callables', 'while', 'special', '.', 'editor_command', '(', 'document', '.', 'text', ')', ':', 'filename', '=', 'special', '.', 'get_filename', '(', 'document', '.', 'text', ')', 'query', '=', '(', 'special', '.', 'get_editor_query', '(', 'document', '.', 'text', ')', 'or', 'self', '.', 'get_last_query', '(', ')', ')', 'sql', ',', 'message', '=', 'special', '.', 'open_external_editor', '(', 'filename', ',', 'sql', '=', 'query', ')', 'if', 'message', ':', '# Something went wrong. Raise an exception and bail.', 'raise', 'RuntimeError', '(', 'message', ')', 'cli', '.', 'current_buffer', '.', 'document', '=', 'Document', '(', 'sql', ',', 'cursor_position', '=', 'len', '(', 'sql', ')', ')', 'cli', '.', 'application', '.', 'pre_run_callables', '=', '[', ']', 'document', '=', 'cli', '.', 'run', '(', ')', 'continue', 'cli', '.', 'application', '.', 'pre_run_callables', '=', 'saved_callables', 'return', 'document']
Editor command is any query that is prefixed or suffixed by a '\e'. The reason for a while loop is because a user might edit a query multiple times. For eg: "select * from \e"<enter> to edit it in vim, then come back to the prompt with the edited query "select * from blah where q = 'abc'\e" to edit it again. :param cli: CommandLineInterface :param document: Document :return: Document
['Editor', 'command', 'is', 'any', 'query', 'that', 'is', 'prefixed', 'or', 'suffixed', 'by', 'a', '\\', 'e', '.', 'The', 'reason', 'for', 'a', 'while', 'loop', 'is', 'because', 'a', 'user', 'might', 'edit', 'a', 'query', 'multiple', 'times', '.', 'For', 'eg', ':', 'select', '*', 'from', '\\', 'e', '<enter', '>', 'to', 'edit', 'it', 'in', 'vim', 'then', 'come', 'back', 'to', 'the', 'prompt', 'with', 'the', 'edited', 'query', 'select', '*', 'from', 'blah', 'where', 'q', '=', 'abc', '\\', 'e', 'to', 'edit', 'it', 'again', '.', ':', 'param', 'cli', ':', 'CommandLineInterface', ':', 'param', 'document', ':', 'Document', ':', 'return', ':', 'Document']
train
https://github.com/dbcli/athenacli/blob/bcab59e4953145866430083e902ed4d042d4ebba/athenacli/main.py#L204-L234
277
andycasey/sick
sick/models/model.py
Model.estimate
def estimate(self, data, full_output=False, **kwargs): """ Estimate the model parameters, given the data. """ # Number of model comparisons can be specified in the configuration. num_model_comparisons = self._configuration.get("estimate", {}).get( "num_model_comparisons", self.grid_points.size) # If it's a fraction, we need to convert that to an integer. if 1 > num_model_comparisons > 0: num_model_comparisons *= self.grid_points.size # If the num_model_comparison is provided as a keyword argument, use it. num_model_comparisons = kwargs.pop("num_model_comparisons", int(num_model_comparisons)) logger.debug("Number of model comparisons to make for initial estimate:" " {0}".format(num_model_comparisons)) # Match the data to the model channels. matched_channels, missing_channels, ignore_parameters \ = self._match_channels_to_data(data) logger.debug("Matched channels: {0}, missing channels: {1}, ignore " "parameters: {2}".format(matched_channels, missing_channels, ignore_parameters)) # Load the intensities t = time() s = self.grid_points.size/num_model_comparisons # step size grid_points = self.grid_points[::s] intensities = np.memmap( self._configuration["model_grid"]["intensities"], dtype="float32", mode="r", shape=(self.grid_points.size, self.wavelengths.size))[::s] logger.debug("Took {:.0f} seconds to load and slice intensities".format( time() - t)) # Which matched, data channel has the highest S/N? # (This channel will be used to estimate astrophysical parameters) data, pixels_affected = self._apply_data_mask(data) median_snr = dict(zip(matched_channels, [np.nanmedian(spec.flux/(spec.variance**0.5)) for spec in data])) median_snr.pop(None, None) # Remove unmatched data spectra ccf_channel = self._configuration.get("settings", {}).get("ccf_channel", max(median_snr, key=median_snr.get)) if ccf_channel not in matched_channels: logger.warn("Ignoring CCF channel {0} because it was not a matched" " channel".format(ccf_channel)) ccf_channel = max(median_snr, key=median_snr.get) logger.debug("Channel with peak SNR is {0}".format(ccf_channel)) # Are there *any* continuum parameters in any matched channel? any_continuum_parameters = any(map(lambda s: s.startswith("continuum_"), set(self.parameters).difference(ignore_parameters))) # [TODO]: CCF MASK # [TODO]: Don't require CCF if we have only continuum parameters. z_limits = self._configuration["settings"].get("ccf_z_limits", None) theta = {} # Dictionary for the estimated model parameters. best_grid_index = None c = speed_of_light.to("km/s").value for matched_channel, spectrum in zip(matched_channels, data): if matched_channel is None: continue # Do we need todo cross-correlation for this channel? # We do if there are redshift parameters for this channel, # or if there is a global redshift or global continuum parameters # and this channel is the highest S/N. if "z_{}".format(matched_channel) in self.parameters \ or ((any_continuum_parameters or "z" in self.parameters) \ and matched_channel == ccf_channel): # Get the continuum degree for this channel. continuum_degree = self._configuration["model"].get("continuum", { matched_channel: -1 })[matched_channel] logger.debug("Perfoming CCF on {0} channel with a continuum " "degree of {1}".format(matched_channel, continuum_degree)) # Get model wavelength indices that match the data. # get the points that are in the mask, and within the spectrum # limits # TODO: Make this CCF not model mask. idx = np.where(self._model_mask() \ * (self.wavelengths >= spectrum.disp[0]) \ * (spectrum.disp[-1] >= self.wavelengths))[0] v, v_err, R = spectrum.cross_correlate( (self.wavelengths[idx], intensities[:, idx]), #(self.wavelengths, intensities), continuum_degree=continuum_degree, z_limits=z_limits) # Identify the best point by the CCF peak. best = np.nanargmax(R) # Now, why did we do CCF in this channel? Which model parameters # should be updated? if "z_{}".format(matched_channel) in self.parameters: theta["z_{}".format(matched_channel)] = v[best] / c elif "z" in self.parameters: # If there is a global redshift, update it. theta["z"] = v[best] / c # Continuum parameters will be updated later, so that each # channel is checked to see if it has the highest S/N, # otherwise we might be trying to calculate continuum # parameters when we haven't done CCF on the highest S/N # spectra yet. if matched_channel == ccf_channel: # Update astrophysical parameters. theta.update(dict(zip(grid_points.dtype.names, grid_points[best]))) best_grid_index = best # If there are continuum parameters, calculate them from the best point. if any_continuum_parameters: for matched_channel, spectrum in zip(matched_channels, data): if matched_channel is None: continue # The template spectra at the best point needs to be # redshifted to the data, and then continuum coefficients # calculated from that. # Get the continuum degree for this channel. continuum_degree = self._configuration["model"].get("continuum", { matched_channel: -1 })[matched_channel] # Get model wavelength indices that match the data. idx = np.clip(self.wavelengths.searchsorted( [spectrum.disp[0], spectrum.disp[-1]]) + [0, 1], 0, self.wavelengths.size) # Redshift and bin the spectrum. z = theta.get("z_{}".format(matched_channel), theta.get("z", 0)) best_intensities \ = np.copy(intensities[best_grid_index, idx[0]:idx[1]]).flatten() # Apply model mask. model_mask = self._model_mask(self.wavelengths[idx[0]:idx[1]]) best_intensities[~model_mask] = np.nan best_intensities = best_intensities * specutils.sample.resample( self.wavelengths[idx[0]:idx[1]] * (1 + z), spectrum.disp) # Calculate the continuum coefficients for this channel. continuum = spectrum.flux/best_intensities finite = np.isfinite(continuum) try: coefficients = np.polyfit( spectrum.disp[finite], continuum[finite], continuum_degree, )#w=spectrum.ivariance[finite]) except np.linalg.linalg.LinAlgError: logger.exception("Exception in initial polynomial fit") coefficients = np.polyfit(spectrum.disp[finite], continuum[finite], continuum_degree) # They go into theta backwards. such that coefficients[-1] is # continuum_{name}_0 theta.update(dict(zip( ["continuum_{0}_{1}".format(matched_channel, i) \ for i in range(continuum_degree + 1)], coefficients[::-1] ))) # Remaining parameters could be: resolving power, outlier pixels, # underestimated variance. remaining_parameters = set(self.parameters)\ .difference(ignore_parameters)\ .difference(theta) if remaining_parameters: logger.debug("Remaining parameters to estimate: {0}. For these we " "will just assume reasonable initial values.".format( remaining_parameters)) for parameter in remaining_parameters: if parameter == "resolution" \ or parameter.startswith("resolution_"): if parameter.startswith("resolution_"): spectra = [data[matched_channels.index( parameter.split("_")[1])]] else: spectra = [s for s in data if s is not None] R = [s.disp.mean()/np.diff(s.disp).mean() for s in spectra] # Assume oversampling rate of ~5. theta.update({ parameter: np.median(R)/5.}) elif parameter == "ln_f" or parameter.startswith("ln_f_"): theta.update({ parameter: 0.5 }) # Not overestimated. elif parameter in ("Po", "Vo"): theta.update({ "Po": 0.01, # 1% outlier pixels. "Vo": np.mean([np.nanmedian(s.variance) for s in data]), }) logger.info("Initial estimate: {}".format(theta)) # Having full_output = True means return the best spectra estimate. if full_output: # Create model fluxes and calculate some metric. __intensities = np.copy(intensities[best_grid_index]) # Apply model masks. __intensities[~self._model_mask()] = np.nan chi_sq, dof, model_fluxes = self._chi_sq(theta, data, __intensities=__intensities, __no_precomputed_binning=True) del intensities return (theta, chi_sq, dof, model_fluxes) # Delete the reference to intensities del intensities return theta
python
def estimate(self, data, full_output=False, **kwargs): """ Estimate the model parameters, given the data. """ # Number of model comparisons can be specified in the configuration. num_model_comparisons = self._configuration.get("estimate", {}).get( "num_model_comparisons", self.grid_points.size) # If it's a fraction, we need to convert that to an integer. if 1 > num_model_comparisons > 0: num_model_comparisons *= self.grid_points.size # If the num_model_comparison is provided as a keyword argument, use it. num_model_comparisons = kwargs.pop("num_model_comparisons", int(num_model_comparisons)) logger.debug("Number of model comparisons to make for initial estimate:" " {0}".format(num_model_comparisons)) # Match the data to the model channels. matched_channels, missing_channels, ignore_parameters \ = self._match_channels_to_data(data) logger.debug("Matched channels: {0}, missing channels: {1}, ignore " "parameters: {2}".format(matched_channels, missing_channels, ignore_parameters)) # Load the intensities t = time() s = self.grid_points.size/num_model_comparisons # step size grid_points = self.grid_points[::s] intensities = np.memmap( self._configuration["model_grid"]["intensities"], dtype="float32", mode="r", shape=(self.grid_points.size, self.wavelengths.size))[::s] logger.debug("Took {:.0f} seconds to load and slice intensities".format( time() - t)) # Which matched, data channel has the highest S/N? # (This channel will be used to estimate astrophysical parameters) data, pixels_affected = self._apply_data_mask(data) median_snr = dict(zip(matched_channels, [np.nanmedian(spec.flux/(spec.variance**0.5)) for spec in data])) median_snr.pop(None, None) # Remove unmatched data spectra ccf_channel = self._configuration.get("settings", {}).get("ccf_channel", max(median_snr, key=median_snr.get)) if ccf_channel not in matched_channels: logger.warn("Ignoring CCF channel {0} because it was not a matched" " channel".format(ccf_channel)) ccf_channel = max(median_snr, key=median_snr.get) logger.debug("Channel with peak SNR is {0}".format(ccf_channel)) # Are there *any* continuum parameters in any matched channel? any_continuum_parameters = any(map(lambda s: s.startswith("continuum_"), set(self.parameters).difference(ignore_parameters))) # [TODO]: CCF MASK # [TODO]: Don't require CCF if we have only continuum parameters. z_limits = self._configuration["settings"].get("ccf_z_limits", None) theta = {} # Dictionary for the estimated model parameters. best_grid_index = None c = speed_of_light.to("km/s").value for matched_channel, spectrum in zip(matched_channels, data): if matched_channel is None: continue # Do we need todo cross-correlation for this channel? # We do if there are redshift parameters for this channel, # or if there is a global redshift or global continuum parameters # and this channel is the highest S/N. if "z_{}".format(matched_channel) in self.parameters \ or ((any_continuum_parameters or "z" in self.parameters) \ and matched_channel == ccf_channel): # Get the continuum degree for this channel. continuum_degree = self._configuration["model"].get("continuum", { matched_channel: -1 })[matched_channel] logger.debug("Perfoming CCF on {0} channel with a continuum " "degree of {1}".format(matched_channel, continuum_degree)) # Get model wavelength indices that match the data. # get the points that are in the mask, and within the spectrum # limits # TODO: Make this CCF not model mask. idx = np.where(self._model_mask() \ * (self.wavelengths >= spectrum.disp[0]) \ * (spectrum.disp[-1] >= self.wavelengths))[0] v, v_err, R = spectrum.cross_correlate( (self.wavelengths[idx], intensities[:, idx]), #(self.wavelengths, intensities), continuum_degree=continuum_degree, z_limits=z_limits) # Identify the best point by the CCF peak. best = np.nanargmax(R) # Now, why did we do CCF in this channel? Which model parameters # should be updated? if "z_{}".format(matched_channel) in self.parameters: theta["z_{}".format(matched_channel)] = v[best] / c elif "z" in self.parameters: # If there is a global redshift, update it. theta["z"] = v[best] / c # Continuum parameters will be updated later, so that each # channel is checked to see if it has the highest S/N, # otherwise we might be trying to calculate continuum # parameters when we haven't done CCF on the highest S/N # spectra yet. if matched_channel == ccf_channel: # Update astrophysical parameters. theta.update(dict(zip(grid_points.dtype.names, grid_points[best]))) best_grid_index = best # If there are continuum parameters, calculate them from the best point. if any_continuum_parameters: for matched_channel, spectrum in zip(matched_channels, data): if matched_channel is None: continue # The template spectra at the best point needs to be # redshifted to the data, and then continuum coefficients # calculated from that. # Get the continuum degree for this channel. continuum_degree = self._configuration["model"].get("continuum", { matched_channel: -1 })[matched_channel] # Get model wavelength indices that match the data. idx = np.clip(self.wavelengths.searchsorted( [spectrum.disp[0], spectrum.disp[-1]]) + [0, 1], 0, self.wavelengths.size) # Redshift and bin the spectrum. z = theta.get("z_{}".format(matched_channel), theta.get("z", 0)) best_intensities \ = np.copy(intensities[best_grid_index, idx[0]:idx[1]]).flatten() # Apply model mask. model_mask = self._model_mask(self.wavelengths[idx[0]:idx[1]]) best_intensities[~model_mask] = np.nan best_intensities = best_intensities * specutils.sample.resample( self.wavelengths[idx[0]:idx[1]] * (1 + z), spectrum.disp) # Calculate the continuum coefficients for this channel. continuum = spectrum.flux/best_intensities finite = np.isfinite(continuum) try: coefficients = np.polyfit( spectrum.disp[finite], continuum[finite], continuum_degree, )#w=spectrum.ivariance[finite]) except np.linalg.linalg.LinAlgError: logger.exception("Exception in initial polynomial fit") coefficients = np.polyfit(spectrum.disp[finite], continuum[finite], continuum_degree) # They go into theta backwards. such that coefficients[-1] is # continuum_{name}_0 theta.update(dict(zip( ["continuum_{0}_{1}".format(matched_channel, i) \ for i in range(continuum_degree + 1)], coefficients[::-1] ))) # Remaining parameters could be: resolving power, outlier pixels, # underestimated variance. remaining_parameters = set(self.parameters)\ .difference(ignore_parameters)\ .difference(theta) if remaining_parameters: logger.debug("Remaining parameters to estimate: {0}. For these we " "will just assume reasonable initial values.".format( remaining_parameters)) for parameter in remaining_parameters: if parameter == "resolution" \ or parameter.startswith("resolution_"): if parameter.startswith("resolution_"): spectra = [data[matched_channels.index( parameter.split("_")[1])]] else: spectra = [s for s in data if s is not None] R = [s.disp.mean()/np.diff(s.disp).mean() for s in spectra] # Assume oversampling rate of ~5. theta.update({ parameter: np.median(R)/5.}) elif parameter == "ln_f" or parameter.startswith("ln_f_"): theta.update({ parameter: 0.5 }) # Not overestimated. elif parameter in ("Po", "Vo"): theta.update({ "Po": 0.01, # 1% outlier pixels. "Vo": np.mean([np.nanmedian(s.variance) for s in data]), }) logger.info("Initial estimate: {}".format(theta)) # Having full_output = True means return the best spectra estimate. if full_output: # Create model fluxes and calculate some metric. __intensities = np.copy(intensities[best_grid_index]) # Apply model masks. __intensities[~self._model_mask()] = np.nan chi_sq, dof, model_fluxes = self._chi_sq(theta, data, __intensities=__intensities, __no_precomputed_binning=True) del intensities return (theta, chi_sq, dof, model_fluxes) # Delete the reference to intensities del intensities return theta
['def', 'estimate', '(', 'self', ',', 'data', ',', 'full_output', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', '# Number of model comparisons can be specified in the configuration.', 'num_model_comparisons', '=', 'self', '.', '_configuration', '.', 'get', '(', '"estimate"', ',', '{', '}', ')', '.', 'get', '(', '"num_model_comparisons"', ',', 'self', '.', 'grid_points', '.', 'size', ')', "# If it's a fraction, we need to convert that to an integer.", 'if', '1', '>', 'num_model_comparisons', '>', '0', ':', 'num_model_comparisons', '*=', 'self', '.', 'grid_points', '.', 'size', '# If the num_model_comparison is provided as a keyword argument, use it.', 'num_model_comparisons', '=', 'kwargs', '.', 'pop', '(', '"num_model_comparisons"', ',', 'int', '(', 'num_model_comparisons', ')', ')', 'logger', '.', 'debug', '(', '"Number of model comparisons to make for initial estimate:"', '" {0}"', '.', 'format', '(', 'num_model_comparisons', ')', ')', '# Match the data to the model channels.', 'matched_channels', ',', 'missing_channels', ',', 'ignore_parameters', '=', 'self', '.', '_match_channels_to_data', '(', 'data', ')', 'logger', '.', 'debug', '(', '"Matched channels: {0}, missing channels: {1}, ignore "', '"parameters: {2}"', '.', 'format', '(', 'matched_channels', ',', 'missing_channels', ',', 'ignore_parameters', ')', ')', '# Load the intensities', 't', '=', 'time', '(', ')', 's', '=', 'self', '.', 'grid_points', '.', 'size', '/', 'num_model_comparisons', '# step size', 'grid_points', '=', 'self', '.', 'grid_points', '[', ':', ':', 's', ']', 'intensities', '=', 'np', '.', 'memmap', '(', 'self', '.', '_configuration', '[', '"model_grid"', ']', '[', '"intensities"', ']', ',', 'dtype', '=', '"float32"', ',', 'mode', '=', '"r"', ',', 'shape', '=', '(', 'self', '.', 'grid_points', '.', 'size', ',', 'self', '.', 'wavelengths', '.', 'size', ')', ')', '[', ':', ':', 's', ']', 'logger', '.', 'debug', '(', '"Took {:.0f} seconds to load and slice intensities"', '.', 'format', '(', 'time', '(', ')', '-', 't', ')', ')', '# Which matched, data channel has the highest S/N?', '# (This channel will be used to estimate astrophysical parameters)', 'data', ',', 'pixels_affected', '=', 'self', '.', '_apply_data_mask', '(', 'data', ')', 'median_snr', '=', 'dict', '(', 'zip', '(', 'matched_channels', ',', '[', 'np', '.', 'nanmedian', '(', 'spec', '.', 'flux', '/', '(', 'spec', '.', 'variance', '**', '0.5', ')', ')', 'for', 'spec', 'in', 'data', ']', ')', ')', 'median_snr', '.', 'pop', '(', 'None', ',', 'None', ')', '# Remove unmatched data spectra', 'ccf_channel', '=', 'self', '.', '_configuration', '.', 'get', '(', '"settings"', ',', '{', '}', ')', '.', 'get', '(', '"ccf_channel"', ',', 'max', '(', 'median_snr', ',', 'key', '=', 'median_snr', '.', 'get', ')', ')', 'if', 'ccf_channel', 'not', 'in', 'matched_channels', ':', 'logger', '.', 'warn', '(', '"Ignoring CCF channel {0} because it was not a matched"', '" channel"', '.', 'format', '(', 'ccf_channel', ')', ')', 'ccf_channel', '=', 'max', '(', 'median_snr', ',', 'key', '=', 'median_snr', '.', 'get', ')', 'logger', '.', 'debug', '(', '"Channel with peak SNR is {0}"', '.', 'format', '(', 'ccf_channel', ')', ')', '# Are there *any* continuum parameters in any matched channel?', 'any_continuum_parameters', '=', 'any', '(', 'map', '(', 'lambda', 's', ':', 's', '.', 'startswith', '(', '"continuum_"', ')', ',', 'set', '(', 'self', '.', 'parameters', ')', '.', 'difference', '(', 'ignore_parameters', ')', ')', ')', '# [TODO]: CCF MASK', "# [TODO]: Don't require CCF if we have only continuum parameters.", 'z_limits', '=', 'self', '.', '_configuration', '[', '"settings"', ']', '.', 'get', '(', '"ccf_z_limits"', ',', 'None', ')', 'theta', '=', '{', '}', '# Dictionary for the estimated model parameters.', 'best_grid_index', '=', 'None', 'c', '=', 'speed_of_light', '.', 'to', '(', '"km/s"', ')', '.', 'value', 'for', 'matched_channel', ',', 'spectrum', 'in', 'zip', '(', 'matched_channels', ',', 'data', ')', ':', 'if', 'matched_channel', 'is', 'None', ':', 'continue', '# Do we need todo cross-correlation for this channel?', '# We do if there are redshift parameters for this channel,', '# or if there is a global redshift or global continuum parameters', '# and this channel is the highest S/N.', 'if', '"z_{}"', '.', 'format', '(', 'matched_channel', ')', 'in', 'self', '.', 'parameters', 'or', '(', '(', 'any_continuum_parameters', 'or', '"z"', 'in', 'self', '.', 'parameters', ')', 'and', 'matched_channel', '==', 'ccf_channel', ')', ':', '# Get the continuum degree for this channel.', 'continuum_degree', '=', 'self', '.', '_configuration', '[', '"model"', ']', '.', 'get', '(', '"continuum"', ',', '{', 'matched_channel', ':', '-', '1', '}', ')', '[', 'matched_channel', ']', 'logger', '.', 'debug', '(', '"Perfoming CCF on {0} channel with a continuum "', '"degree of {1}"', '.', 'format', '(', 'matched_channel', ',', 'continuum_degree', ')', ')', '# Get model wavelength indices that match the data.', '# get the points that are in the mask, and within the spectrum', '# limits', '# TODO: Make this CCF not model mask.', 'idx', '=', 'np', '.', 'where', '(', 'self', '.', '_model_mask', '(', ')', '*', '(', 'self', '.', 'wavelengths', '>=', 'spectrum', '.', 'disp', '[', '0', ']', ')', '*', '(', 'spectrum', '.', 'disp', '[', '-', '1', ']', '>=', 'self', '.', 'wavelengths', ')', ')', '[', '0', ']', 'v', ',', 'v_err', ',', 'R', '=', 'spectrum', '.', 'cross_correlate', '(', '(', 'self', '.', 'wavelengths', '[', 'idx', ']', ',', 'intensities', '[', ':', ',', 'idx', ']', ')', ',', '#(self.wavelengths, intensities),', 'continuum_degree', '=', 'continuum_degree', ',', 'z_limits', '=', 'z_limits', ')', '# Identify the best point by the CCF peak.', 'best', '=', 'np', '.', 'nanargmax', '(', 'R', ')', '# Now, why did we do CCF in this channel? Which model parameters', '# should be updated?', 'if', '"z_{}"', '.', 'format', '(', 'matched_channel', ')', 'in', 'self', '.', 'parameters', ':', 'theta', '[', '"z_{}"', '.', 'format', '(', 'matched_channel', ')', ']', '=', 'v', '[', 'best', ']', '/', 'c', 'elif', '"z"', 'in', 'self', '.', 'parameters', ':', '# If there is a global redshift, update it.', 'theta', '[', '"z"', ']', '=', 'v', '[', 'best', ']', '/', 'c', '# Continuum parameters will be updated later, so that each', '# channel is checked to see if it has the highest S/N,', '# otherwise we might be trying to calculate continuum', "# parameters when we haven't done CCF on the highest S/N", '# spectra yet.', 'if', 'matched_channel', '==', 'ccf_channel', ':', '# Update astrophysical parameters.', 'theta', '.', 'update', '(', 'dict', '(', 'zip', '(', 'grid_points', '.', 'dtype', '.', 'names', ',', 'grid_points', '[', 'best', ']', ')', ')', ')', 'best_grid_index', '=', 'best', '# If there are continuum parameters, calculate them from the best point.', 'if', 'any_continuum_parameters', ':', 'for', 'matched_channel', ',', 'spectrum', 'in', 'zip', '(', 'matched_channels', ',', 'data', ')', ':', 'if', 'matched_channel', 'is', 'None', ':', 'continue', '# The template spectra at the best point needs to be', '# redshifted to the data, and then continuum coefficients', '# calculated from that.', '# Get the continuum degree for this channel.', 'continuum_degree', '=', 'self', '.', '_configuration', '[', '"model"', ']', '.', 'get', '(', '"continuum"', ',', '{', 'matched_channel', ':', '-', '1', '}', ')', '[', 'matched_channel', ']', '# Get model wavelength indices that match the data.', 'idx', '=', 'np', '.', 'clip', '(', 'self', '.', 'wavelengths', '.', 'searchsorted', '(', '[', 'spectrum', '.', 'disp', '[', '0', ']', ',', 'spectrum', '.', 'disp', '[', '-', '1', ']', ']', ')', '+', '[', '0', ',', '1', ']', ',', '0', ',', 'self', '.', 'wavelengths', '.', 'size', ')', '# Redshift and bin the spectrum.', 'z', '=', 'theta', '.', 'get', '(', '"z_{}"', '.', 'format', '(', 'matched_channel', ')', ',', 'theta', '.', 'get', '(', '"z"', ',', '0', ')', ')', 'best_intensities', '=', 'np', '.', 'copy', '(', 'intensities', '[', 'best_grid_index', ',', 'idx', '[', '0', ']', ':', 'idx', '[', '1', ']', ']', ')', '.', 'flatten', '(', ')', '# Apply model mask.', 'model_mask', '=', 'self', '.', '_model_mask', '(', 'self', '.', 'wavelengths', '[', 'idx', '[', '0', ']', ':', 'idx', '[', '1', ']', ']', ')', 'best_intensities', '[', '~', 'model_mask', ']', '=', 'np', '.', 'nan', 'best_intensities', '=', 'best_intensities', '*', 'specutils', '.', 'sample', '.', 'resample', '(', 'self', '.', 'wavelengths', '[', 'idx', '[', '0', ']', ':', 'idx', '[', '1', ']', ']', '*', '(', '1', '+', 'z', ')', ',', 'spectrum', '.', 'disp', ')', '# Calculate the continuum coefficients for this channel.', 'continuum', '=', 'spectrum', '.', 'flux', '/', 'best_intensities', 'finite', '=', 'np', '.', 'isfinite', '(', 'continuum', ')', 'try', ':', 'coefficients', '=', 'np', '.', 'polyfit', '(', 'spectrum', '.', 'disp', '[', 'finite', ']', ',', 'continuum', '[', 'finite', ']', ',', 'continuum_degree', ',', ')', '#w=spectrum.ivariance[finite])', 'except', 'np', '.', 'linalg', '.', 'linalg', '.', 'LinAlgError', ':', 'logger', '.', 'exception', '(', '"Exception in initial polynomial fit"', ')', 'coefficients', '=', 'np', '.', 'polyfit', '(', 'spectrum', '.', 'disp', '[', 'finite', ']', ',', 'continuum', '[', 'finite', ']', ',', 'continuum_degree', ')', '# They go into theta backwards. such that coefficients[-1] is', '# continuum_{name}_0', 'theta', '.', 'update', '(', 'dict', '(', 'zip', '(', '[', '"continuum_{0}_{1}"', '.', 'format', '(', 'matched_channel', ',', 'i', ')', 'for', 'i', 'in', 'range', '(', 'continuum_degree', '+', '1', ')', ']', ',', 'coefficients', '[', ':', ':', '-', '1', ']', ')', ')', ')', '# Remaining parameters could be: resolving power, outlier pixels,', '# underestimated variance.', 'remaining_parameters', '=', 'set', '(', 'self', '.', 'parameters', ')', '.', 'difference', '(', 'ignore_parameters', ')', '.', 'difference', '(', 'theta', ')', 'if', 'remaining_parameters', ':', 'logger', '.', 'debug', '(', '"Remaining parameters to estimate: {0}. For these we "', '"will just assume reasonable initial values."', '.', 'format', '(', 'remaining_parameters', ')', ')', 'for', 'parameter', 'in', 'remaining_parameters', ':', 'if', 'parameter', '==', '"resolution"', 'or', 'parameter', '.', 'startswith', '(', '"resolution_"', ')', ':', 'if', 'parameter', '.', 'startswith', '(', '"resolution_"', ')', ':', 'spectra', '=', '[', 'data', '[', 'matched_channels', '.', 'index', '(', 'parameter', '.', 'split', '(', '"_"', ')', '[', '1', ']', ')', ']', ']', 'else', ':', 'spectra', '=', '[', 's', 'for', 's', 'in', 'data', 'if', 's', 'is', 'not', 'None', ']', 'R', '=', '[', 's', '.', 'disp', '.', 'mean', '(', ')', '/', 'np', '.', 'diff', '(', 's', '.', 'disp', ')', '.', 'mean', '(', ')', 'for', 's', 'in', 'spectra', ']', '# Assume oversampling rate of ~5.', 'theta', '.', 'update', '(', '{', 'parameter', ':', 'np', '.', 'median', '(', 'R', ')', '/', '5.', '}', ')', 'elif', 'parameter', '==', '"ln_f"', 'or', 'parameter', '.', 'startswith', '(', '"ln_f_"', ')', ':', 'theta', '.', 'update', '(', '{', 'parameter', ':', '0.5', '}', ')', '# Not overestimated.', 'elif', 'parameter', 'in', '(', '"Po"', ',', '"Vo"', ')', ':', 'theta', '.', 'update', '(', '{', '"Po"', ':', '0.01', ',', '# 1% outlier pixels.', '"Vo"', ':', 'np', '.', 'mean', '(', '[', 'np', '.', 'nanmedian', '(', 's', '.', 'variance', ')', 'for', 's', 'in', 'data', ']', ')', ',', '}', ')', 'logger', '.', 'info', '(', '"Initial estimate: {}"', '.', 'format', '(', 'theta', ')', ')', '# Having full_output = True means return the best spectra estimate.', 'if', 'full_output', ':', '# Create model fluxes and calculate some metric.', '__intensities', '=', 'np', '.', 'copy', '(', 'intensities', '[', 'best_grid_index', ']', ')', '# Apply model masks.', '__intensities', '[', '~', 'self', '.', '_model_mask', '(', ')', ']', '=', 'np', '.', 'nan', 'chi_sq', ',', 'dof', ',', 'model_fluxes', '=', 'self', '.', '_chi_sq', '(', 'theta', ',', 'data', ',', '__intensities', '=', '__intensities', ',', '__no_precomputed_binning', '=', 'True', ')', 'del', 'intensities', 'return', '(', 'theta', ',', 'chi_sq', ',', 'dof', ',', 'model_fluxes', ')', '# Delete the reference to intensities', 'del', 'intensities', 'return', 'theta']
Estimate the model parameters, given the data.
['Estimate', 'the', 'model', 'parameters', 'given', 'the', 'data', '.']
train
https://github.com/andycasey/sick/blob/6c37686182794c4cafea45abf7062b30b789b1a2/sick/models/model.py#L32-L257
278
PMEAL/OpenPNM
openpnm/core/Base.py
Base.filter_by_label
def filter_by_label(self, pores=[], throats=[], labels=None, mode='or'): r""" Returns which of the supplied pores (or throats) has the specified label Parameters ---------- pores, or throats : array_like List of pores or throats to be filtered labels : list of strings The labels to apply as a filter mode : string Controls how the filter is applied. Options include: **'or', 'union', 'any'**: (default) Returns a list of the given locations where *any* of the given labels exist. **'and', 'intersection', 'all'**: Only locations where *all* the given labels are found. **'xor', 'exclusive_or'**: Only locations where exactly *one* of the given labels are found. **'nor', 'none', 'not'**: Only locations where *none* of the given labels are found. **'nand'** : Only locations with *some but not all* of the given labels are returned. **'xnor'** : Only locations with *more than one* of the given labels are returned. Returns ------- A list of pores (or throats) that have been filtered according the given criteria. The returned list is a subset of the received list of pores (or throats). See Also -------- pores throats Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> pn.filter_by_label(pores=[0, 1, 5, 6], labels='left') array([0, 1]) >>> Ps = pn.pores(['top', 'bottom', 'front'], mode='or') >>> pn.filter_by_label(pores=Ps, labels=['top', 'front'], ... mode='and') array([ 4, 9, 14, 19, 24]) """ # Convert inputs to locations and element if (sp.size(throats) > 0) and (sp.size(pores) > 0): raise Exception('Can only filter either pores OR labels') if sp.size(pores) > 0: element = 'pore' locations = self._parse_indices(pores) elif sp.size(throats) > 0: element = 'throat' locations = self._parse_indices(throats) else: return(sp.array([], dtype=int)) labels = self._parse_labels(labels=labels, element=element) labels = [element+'.'+item.split('.')[-1] for item in labels] all_locs = self._get_indices(element=element, labels=labels, mode=mode) mask = self._tomask(indices=all_locs, element=element) ind = mask[locations] return locations[ind]
python
def filter_by_label(self, pores=[], throats=[], labels=None, mode='or'): r""" Returns which of the supplied pores (or throats) has the specified label Parameters ---------- pores, or throats : array_like List of pores or throats to be filtered labels : list of strings The labels to apply as a filter mode : string Controls how the filter is applied. Options include: **'or', 'union', 'any'**: (default) Returns a list of the given locations where *any* of the given labels exist. **'and', 'intersection', 'all'**: Only locations where *all* the given labels are found. **'xor', 'exclusive_or'**: Only locations where exactly *one* of the given labels are found. **'nor', 'none', 'not'**: Only locations where *none* of the given labels are found. **'nand'** : Only locations with *some but not all* of the given labels are returned. **'xnor'** : Only locations with *more than one* of the given labels are returned. Returns ------- A list of pores (or throats) that have been filtered according the given criteria. The returned list is a subset of the received list of pores (or throats). See Also -------- pores throats Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> pn.filter_by_label(pores=[0, 1, 5, 6], labels='left') array([0, 1]) >>> Ps = pn.pores(['top', 'bottom', 'front'], mode='or') >>> pn.filter_by_label(pores=Ps, labels=['top', 'front'], ... mode='and') array([ 4, 9, 14, 19, 24]) """ # Convert inputs to locations and element if (sp.size(throats) > 0) and (sp.size(pores) > 0): raise Exception('Can only filter either pores OR labels') if sp.size(pores) > 0: element = 'pore' locations = self._parse_indices(pores) elif sp.size(throats) > 0: element = 'throat' locations = self._parse_indices(throats) else: return(sp.array([], dtype=int)) labels = self._parse_labels(labels=labels, element=element) labels = [element+'.'+item.split('.')[-1] for item in labels] all_locs = self._get_indices(element=element, labels=labels, mode=mode) mask = self._tomask(indices=all_locs, element=element) ind = mask[locations] return locations[ind]
['def', 'filter_by_label', '(', 'self', ',', 'pores', '=', '[', ']', ',', 'throats', '=', '[', ']', ',', 'labels', '=', 'None', ',', 'mode', '=', "'or'", ')', ':', '# Convert inputs to locations and element', 'if', '(', 'sp', '.', 'size', '(', 'throats', ')', '>', '0', ')', 'and', '(', 'sp', '.', 'size', '(', 'pores', ')', '>', '0', ')', ':', 'raise', 'Exception', '(', "'Can only filter either pores OR labels'", ')', 'if', 'sp', '.', 'size', '(', 'pores', ')', '>', '0', ':', 'element', '=', "'pore'", 'locations', '=', 'self', '.', '_parse_indices', '(', 'pores', ')', 'elif', 'sp', '.', 'size', '(', 'throats', ')', '>', '0', ':', 'element', '=', "'throat'", 'locations', '=', 'self', '.', '_parse_indices', '(', 'throats', ')', 'else', ':', 'return', '(', 'sp', '.', 'array', '(', '[', ']', ',', 'dtype', '=', 'int', ')', ')', 'labels', '=', 'self', '.', '_parse_labels', '(', 'labels', '=', 'labels', ',', 'element', '=', 'element', ')', 'labels', '=', '[', 'element', '+', "'.'", '+', 'item', '.', 'split', '(', "'.'", ')', '[', '-', '1', ']', 'for', 'item', 'in', 'labels', ']', 'all_locs', '=', 'self', '.', '_get_indices', '(', 'element', '=', 'element', ',', 'labels', '=', 'labels', ',', 'mode', '=', 'mode', ')', 'mask', '=', 'self', '.', '_tomask', '(', 'indices', '=', 'all_locs', ',', 'element', '=', 'element', ')', 'ind', '=', 'mask', '[', 'locations', ']', 'return', 'locations', '[', 'ind', ']']
r""" Returns which of the supplied pores (or throats) has the specified label Parameters ---------- pores, or throats : array_like List of pores or throats to be filtered labels : list of strings The labels to apply as a filter mode : string Controls how the filter is applied. Options include: **'or', 'union', 'any'**: (default) Returns a list of the given locations where *any* of the given labels exist. **'and', 'intersection', 'all'**: Only locations where *all* the given labels are found. **'xor', 'exclusive_or'**: Only locations where exactly *one* of the given labels are found. **'nor', 'none', 'not'**: Only locations where *none* of the given labels are found. **'nand'** : Only locations with *some but not all* of the given labels are returned. **'xnor'** : Only locations with *more than one* of the given labels are returned. Returns ------- A list of pores (or throats) that have been filtered according the given criteria. The returned list is a subset of the received list of pores (or throats). See Also -------- pores throats Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> pn.filter_by_label(pores=[0, 1, 5, 6], labels='left') array([0, 1]) >>> Ps = pn.pores(['top', 'bottom', 'front'], mode='or') >>> pn.filter_by_label(pores=Ps, labels=['top', 'front'], ... mode='and') array([ 4, 9, 14, 19, 24])
['r', 'Returns', 'which', 'of', 'the', 'supplied', 'pores', '(', 'or', 'throats', ')', 'has', 'the', 'specified', 'label']
train
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/core/Base.py#L1178-L1251
279
SpamScope/mail-parser
mailparser/utils.py
decode_header_part
def decode_header_part(header): """ Given an raw header returns an decoded header Args: header (string): header to decode Returns: str (Python 3) or unicode (Python 2) """ if not header: return six.text_type() output = six.text_type() try: for d, c in decode_header(header): c = c if c else 'utf-8' output += ported_string(d, c, 'ignore') # Header parsing failed, when header has charset Shift_JIS except (HeaderParseError, UnicodeError): log.error("Failed decoding header part: {}".format(header)) output += header return output
python
def decode_header_part(header): """ Given an raw header returns an decoded header Args: header (string): header to decode Returns: str (Python 3) or unicode (Python 2) """ if not header: return six.text_type() output = six.text_type() try: for d, c in decode_header(header): c = c if c else 'utf-8' output += ported_string(d, c, 'ignore') # Header parsing failed, when header has charset Shift_JIS except (HeaderParseError, UnicodeError): log.error("Failed decoding header part: {}".format(header)) output += header return output
['def', 'decode_header_part', '(', 'header', ')', ':', 'if', 'not', 'header', ':', 'return', 'six', '.', 'text_type', '(', ')', 'output', '=', 'six', '.', 'text_type', '(', ')', 'try', ':', 'for', 'd', ',', 'c', 'in', 'decode_header', '(', 'header', ')', ':', 'c', '=', 'c', 'if', 'c', 'else', "'utf-8'", 'output', '+=', 'ported_string', '(', 'd', ',', 'c', ',', "'ignore'", ')', '# Header parsing failed, when header has charset Shift_JIS', 'except', '(', 'HeaderParseError', ',', 'UnicodeError', ')', ':', 'log', '.', 'error', '(', '"Failed decoding header part: {}"', '.', 'format', '(', 'header', ')', ')', 'output', '+=', 'header', 'return', 'output']
Given an raw header returns an decoded header Args: header (string): header to decode Returns: str (Python 3) or unicode (Python 2)
['Given', 'an', 'raw', 'header', 'returns', 'an', 'decoded', 'header']
train
https://github.com/SpamScope/mail-parser/blob/814b56d0b803feab9dea04f054b802ce138097e2/mailparser/utils.py#L117-L142
280
quantumlib/Cirq
cirq/linalg/tolerance.py
all_near_zero_mod
def all_near_zero_mod(a: Union[float, complex, Iterable[float], np.ndarray], period: float, *, atol: float = 1e-8) -> bool: """Checks if the tensor's elements are all near multiples of the period. Args: a: Tensor of elements that could all be near multiples of the period. period: The period, e.g. 2 pi when working in radians. atol: Absolute tolerance. """ b = (np.asarray(a) + period / 2) % period - period / 2 return np.all(np.less_equal(np.abs(b), atol))
python
def all_near_zero_mod(a: Union[float, complex, Iterable[float], np.ndarray], period: float, *, atol: float = 1e-8) -> bool: """Checks if the tensor's elements are all near multiples of the period. Args: a: Tensor of elements that could all be near multiples of the period. period: The period, e.g. 2 pi when working in radians. atol: Absolute tolerance. """ b = (np.asarray(a) + period / 2) % period - period / 2 return np.all(np.less_equal(np.abs(b), atol))
['def', 'all_near_zero_mod', '(', 'a', ':', 'Union', '[', 'float', ',', 'complex', ',', 'Iterable', '[', 'float', ']', ',', 'np', '.', 'ndarray', ']', ',', 'period', ':', 'float', ',', '*', ',', 'atol', ':', 'float', '=', '1e-8', ')', '->', 'bool', ':', 'b', '=', '(', 'np', '.', 'asarray', '(', 'a', ')', '+', 'period', '/', '2', ')', '%', 'period', '-', 'period', '/', '2', 'return', 'np', '.', 'all', '(', 'np', '.', 'less_equal', '(', 'np', '.', 'abs', '(', 'b', ')', ',', 'atol', ')', ')']
Checks if the tensor's elements are all near multiples of the period. Args: a: Tensor of elements that could all be near multiples of the period. period: The period, e.g. 2 pi when working in radians. atol: Absolute tolerance.
['Checks', 'if', 'the', 'tensor', 's', 'elements', 'are', 'all', 'near', 'multiples', 'of', 'the', 'period', '.']
train
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/linalg/tolerance.py#L34-L46
281
happyleavesaoc/python-firetv
firetv/__init__.py
FireTV.current_app
def current_app(self): """Return the current app.""" current_focus = self.adb_shell(CURRENT_APP_CMD) if current_focus is None: return None current_focus = current_focus.replace("\r", "") matches = WINDOW_REGEX.search(current_focus) # case 1: current app was successfully found if matches: (pkg, activity) = matches.group("package", "activity") return {"package": pkg, "activity": activity} # case 2: current app could not be found logging.warning("Couldn't get current app, reply was %s", current_focus) return None
python
def current_app(self): """Return the current app.""" current_focus = self.adb_shell(CURRENT_APP_CMD) if current_focus is None: return None current_focus = current_focus.replace("\r", "") matches = WINDOW_REGEX.search(current_focus) # case 1: current app was successfully found if matches: (pkg, activity) = matches.group("package", "activity") return {"package": pkg, "activity": activity} # case 2: current app could not be found logging.warning("Couldn't get current app, reply was %s", current_focus) return None
['def', 'current_app', '(', 'self', ')', ':', 'current_focus', '=', 'self', '.', 'adb_shell', '(', 'CURRENT_APP_CMD', ')', 'if', 'current_focus', 'is', 'None', ':', 'return', 'None', 'current_focus', '=', 'current_focus', '.', 'replace', '(', '"\\r"', ',', '""', ')', 'matches', '=', 'WINDOW_REGEX', '.', 'search', '(', 'current_focus', ')', '# case 1: current app was successfully found', 'if', 'matches', ':', '(', 'pkg', ',', 'activity', ')', '=', 'matches', '.', 'group', '(', '"package"', ',', '"activity"', ')', 'return', '{', '"package"', ':', 'pkg', ',', '"activity"', ':', 'activity', '}', '# case 2: current app could not be found', 'logging', '.', 'warning', '(', '"Couldn\'t get current app, reply was %s"', ',', 'current_focus', ')', 'return', 'None']
Return the current app.
['Return', 'the', 'current', 'app', '.']
train
https://github.com/happyleavesaoc/python-firetv/blob/3dd953376c0d5af502e775ae14ed0afe03224781/firetv/__init__.py#L518-L534
282
joke2k/faker
faker/providers/ssn/et_EE/__init__.py
checksum
def checksum(digits): """Calculate checksum of Estonian personal identity code. Checksum is calculated with "Modulo 11" method using level I or II scale: Level I scale: 1 2 3 4 5 6 7 8 9 1 Level II scale: 3 4 5 6 7 8 9 1 2 3 The digits of the personal code are multiplied by level I scale and summed; if remainder of modulo 11 of the sum is less than 10, checksum is the remainder. If remainder is 10, then level II scale is used; checksum is remainder if remainder < 10 or 0 if remainder is 10. See also https://et.wikipedia.org/wiki/Isikukood """ sum_mod11 = sum(map(operator.mul, digits, Provider.scale1)) % 11 if sum_mod11 < 10: return sum_mod11 sum_mod11 = sum(map(operator.mul, digits, Provider.scale2)) % 11 return 0 if sum_mod11 == 10 else sum_mod11
python
def checksum(digits): """Calculate checksum of Estonian personal identity code. Checksum is calculated with "Modulo 11" method using level I or II scale: Level I scale: 1 2 3 4 5 6 7 8 9 1 Level II scale: 3 4 5 6 7 8 9 1 2 3 The digits of the personal code are multiplied by level I scale and summed; if remainder of modulo 11 of the sum is less than 10, checksum is the remainder. If remainder is 10, then level II scale is used; checksum is remainder if remainder < 10 or 0 if remainder is 10. See also https://et.wikipedia.org/wiki/Isikukood """ sum_mod11 = sum(map(operator.mul, digits, Provider.scale1)) % 11 if sum_mod11 < 10: return sum_mod11 sum_mod11 = sum(map(operator.mul, digits, Provider.scale2)) % 11 return 0 if sum_mod11 == 10 else sum_mod11
['def', 'checksum', '(', 'digits', ')', ':', 'sum_mod11', '=', 'sum', '(', 'map', '(', 'operator', '.', 'mul', ',', 'digits', ',', 'Provider', '.', 'scale1', ')', ')', '%', '11', 'if', 'sum_mod11', '<', '10', ':', 'return', 'sum_mod11', 'sum_mod11', '=', 'sum', '(', 'map', '(', 'operator', '.', 'mul', ',', 'digits', ',', 'Provider', '.', 'scale2', ')', ')', '%', '11', 'return', '0', 'if', 'sum_mod11', '==', '10', 'else', 'sum_mod11']
Calculate checksum of Estonian personal identity code. Checksum is calculated with "Modulo 11" method using level I or II scale: Level I scale: 1 2 3 4 5 6 7 8 9 1 Level II scale: 3 4 5 6 7 8 9 1 2 3 The digits of the personal code are multiplied by level I scale and summed; if remainder of modulo 11 of the sum is less than 10, checksum is the remainder. If remainder is 10, then level II scale is used; checksum is remainder if remainder < 10 or 0 if remainder is 10. See also https://et.wikipedia.org/wiki/Isikukood
['Calculate', 'checksum', 'of', 'Estonian', 'personal', 'identity', 'code', '.']
train
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/ssn/et_EE/__init__.py#L9-L28
283
DataONEorg/d1_python
lib_common/src/d1_common/resource_map.py
ResourceMap.getResourceMapPid
def getResourceMapPid(self): """Returns: str : PID of the Resource Map itself. """ ore = [ o for o in self.subjects(predicate=rdflib.RDF.type, object=ORE.ResourceMap) ][0] pid = [str(o) for o in self.objects(predicate=DCTERMS.identifier, subject=ore)][ 0 ] return pid
python
def getResourceMapPid(self): """Returns: str : PID of the Resource Map itself. """ ore = [ o for o in self.subjects(predicate=rdflib.RDF.type, object=ORE.ResourceMap) ][0] pid = [str(o) for o in self.objects(predicate=DCTERMS.identifier, subject=ore)][ 0 ] return pid
['def', 'getResourceMapPid', '(', 'self', ')', ':', 'ore', '=', '[', 'o', 'for', 'o', 'in', 'self', '.', 'subjects', '(', 'predicate', '=', 'rdflib', '.', 'RDF', '.', 'type', ',', 'object', '=', 'ORE', '.', 'ResourceMap', ')', ']', '[', '0', ']', 'pid', '=', '[', 'str', '(', 'o', ')', 'for', 'o', 'in', 'self', '.', 'objects', '(', 'predicate', '=', 'DCTERMS', '.', 'identifier', ',', 'subject', '=', 'ore', ')', ']', '[', '0', ']', 'return', 'pid']
Returns: str : PID of the Resource Map itself.
['Returns', ':']
train
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/resource_map.py#L427-L439
284
globocom/GloboNetworkAPI-client-python
networkapiclient/ApiPool.py
ApiPool.create
def create(self, pools): """ Method to create pool's :param pools: List containing pool's desired to be created on database :return: None """ data = {'server_pools': pools} return super(ApiPool, self).post('api/v3/pool/', data)
python
def create(self, pools): """ Method to create pool's :param pools: List containing pool's desired to be created on database :return: None """ data = {'server_pools': pools} return super(ApiPool, self).post('api/v3/pool/', data)
['def', 'create', '(', 'self', ',', 'pools', ')', ':', 'data', '=', '{', "'server_pools'", ':', 'pools', '}', 'return', 'super', '(', 'ApiPool', ',', 'self', ')', '.', 'post', '(', "'api/v3/pool/'", ',', 'data', ')']
Method to create pool's :param pools: List containing pool's desired to be created on database :return: None
['Method', 'to', 'create', 'pool', 's']
train
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiPool.py#L125-L134
285
ajk8/hatchery
hatchery/project.py
_get_uploaded_versions_warehouse
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True): """ Query the pypi index at index_url using warehouse api to find all of the "releases" """ url = '/'.join((index_url, project_name, 'json')) response = requests.get(url, verify=requests_verify) if response.status_code == 200: return response.json()['releases'].keys() return None
python
def _get_uploaded_versions_warehouse(project_name, index_url, requests_verify=True): """ Query the pypi index at index_url using warehouse api to find all of the "releases" """ url = '/'.join((index_url, project_name, 'json')) response = requests.get(url, verify=requests_verify) if response.status_code == 200: return response.json()['releases'].keys() return None
['def', '_get_uploaded_versions_warehouse', '(', 'project_name', ',', 'index_url', ',', 'requests_verify', '=', 'True', ')', ':', 'url', '=', "'/'", '.', 'join', '(', '(', 'index_url', ',', 'project_name', ',', "'json'", ')', ')', 'response', '=', 'requests', '.', 'get', '(', 'url', ',', 'verify', '=', 'requests_verify', ')', 'if', 'response', '.', 'status_code', '==', '200', ':', 'return', 'response', '.', 'json', '(', ')', '[', "'releases'", ']', '.', 'keys', '(', ')', 'return', 'None']
Query the pypi index at index_url using warehouse api to find all of the "releases"
['Query', 'the', 'pypi', 'index', 'at', 'index_url', 'using', 'warehouse', 'api', 'to', 'find', 'all', 'of', 'the', 'releases']
train
https://github.com/ajk8/hatchery/blob/e068c9f5366d2c98225babb03d4cde36c710194f/hatchery/project.py#L131-L137
286
apple/turicreate
src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_tree_ensemble.py
_recurse
def _recurse(coreml_tree, scikit_tree, tree_id, node_id, scaling = 1.0, mode = 'regressor', n_classes = 2, tree_index = 0): """Traverse through the tree and append to the tree spec. """ if not(HAS_SKLEARN): raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') ## Recursion should not be called on the leaf node. if node_id == _tree.TREE_LEAF: raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF) # Add a branch node to the tree if scikit_tree.children_left[node_id] != _tree.TREE_LEAF: branch_mode = 'BranchOnValueLessThanEqual' feature_index = scikit_tree.feature[node_id] feature_value = scikit_tree.threshold[node_id] left_child_id = scikit_tree.children_left[node_id] right_child_id = scikit_tree.children_right[node_id] # Add a branch node coreml_tree.add_branch_node(tree_id, node_id, feature_index, feature_value, branch_mode, left_child_id, right_child_id) # Now recurse _recurse(coreml_tree, scikit_tree, tree_id, left_child_id, scaling, mode, n_classes, tree_index) _recurse(coreml_tree, scikit_tree, tree_id, right_child_id, scaling, mode, n_classes, tree_index) # Add a leaf node to the tree else: # Get the scikit-learn value if scikit_tree.n_outputs != 1: raise ValueError('Expected only 1 output in the scikit-learn tree.') value = _get_value(scikit_tree.value[node_id], mode, scaling, n_classes, tree_index) coreml_tree.add_leaf_node(tree_id, node_id, value)
python
def _recurse(coreml_tree, scikit_tree, tree_id, node_id, scaling = 1.0, mode = 'regressor', n_classes = 2, tree_index = 0): """Traverse through the tree and append to the tree spec. """ if not(HAS_SKLEARN): raise RuntimeError('scikit-learn not found. scikit-learn conversion API is disabled.') ## Recursion should not be called on the leaf node. if node_id == _tree.TREE_LEAF: raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF) # Add a branch node to the tree if scikit_tree.children_left[node_id] != _tree.TREE_LEAF: branch_mode = 'BranchOnValueLessThanEqual' feature_index = scikit_tree.feature[node_id] feature_value = scikit_tree.threshold[node_id] left_child_id = scikit_tree.children_left[node_id] right_child_id = scikit_tree.children_right[node_id] # Add a branch node coreml_tree.add_branch_node(tree_id, node_id, feature_index, feature_value, branch_mode, left_child_id, right_child_id) # Now recurse _recurse(coreml_tree, scikit_tree, tree_id, left_child_id, scaling, mode, n_classes, tree_index) _recurse(coreml_tree, scikit_tree, tree_id, right_child_id, scaling, mode, n_classes, tree_index) # Add a leaf node to the tree else: # Get the scikit-learn value if scikit_tree.n_outputs != 1: raise ValueError('Expected only 1 output in the scikit-learn tree.') value = _get_value(scikit_tree.value[node_id], mode, scaling, n_classes, tree_index) coreml_tree.add_leaf_node(tree_id, node_id, value)
['def', '_recurse', '(', 'coreml_tree', ',', 'scikit_tree', ',', 'tree_id', ',', 'node_id', ',', 'scaling', '=', '1.0', ',', 'mode', '=', "'regressor'", ',', 'n_classes', '=', '2', ',', 'tree_index', '=', '0', ')', ':', 'if', 'not', '(', 'HAS_SKLEARN', ')', ':', 'raise', 'RuntimeError', '(', "'scikit-learn not found. scikit-learn conversion API is disabled.'", ')', '## Recursion should not be called on the leaf node.', 'if', 'node_id', '==', '_tree', '.', 'TREE_LEAF', ':', 'raise', 'ValueError', '(', '"Invalid node_id %s"', '%', '_tree', '.', 'TREE_LEAF', ')', '# Add a branch node to the tree', 'if', 'scikit_tree', '.', 'children_left', '[', 'node_id', ']', '!=', '_tree', '.', 'TREE_LEAF', ':', 'branch_mode', '=', "'BranchOnValueLessThanEqual'", 'feature_index', '=', 'scikit_tree', '.', 'feature', '[', 'node_id', ']', 'feature_value', '=', 'scikit_tree', '.', 'threshold', '[', 'node_id', ']', 'left_child_id', '=', 'scikit_tree', '.', 'children_left', '[', 'node_id', ']', 'right_child_id', '=', 'scikit_tree', '.', 'children_right', '[', 'node_id', ']', '# Add a branch node', 'coreml_tree', '.', 'add_branch_node', '(', 'tree_id', ',', 'node_id', ',', 'feature_index', ',', 'feature_value', ',', 'branch_mode', ',', 'left_child_id', ',', 'right_child_id', ')', '# Now recurse', '_recurse', '(', 'coreml_tree', ',', 'scikit_tree', ',', 'tree_id', ',', 'left_child_id', ',', 'scaling', ',', 'mode', ',', 'n_classes', ',', 'tree_index', ')', '_recurse', '(', 'coreml_tree', ',', 'scikit_tree', ',', 'tree_id', ',', 'right_child_id', ',', 'scaling', ',', 'mode', ',', 'n_classes', ',', 'tree_index', ')', '# Add a leaf node to the tree', 'else', ':', '# Get the scikit-learn value', 'if', 'scikit_tree', '.', 'n_outputs', '!=', '1', ':', 'raise', 'ValueError', '(', "'Expected only 1 output in the scikit-learn tree.'", ')', 'value', '=', '_get_value', '(', 'scikit_tree', '.', 'value', '[', 'node_id', ']', ',', 'mode', ',', 'scaling', ',', 'n_classes', ',', 'tree_index', ')', 'coreml_tree', '.', 'add_leaf_node', '(', 'tree_id', ',', 'node_id', ',', 'value', ')']
Traverse through the tree and append to the tree spec.
['Traverse', 'through', 'the', 'tree', 'and', 'append', 'to', 'the', 'tree', 'spec', '.']
train
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/coremltools/converters/sklearn/_tree_ensemble.py#L44-L77
287
google/python-gflags
gflags2man.py
ProgramInfo.ParsePythonFlags
def ParsePythonFlags(self, start_line=0): """Parse python/swig style flags.""" modname = None # name of current module modlist = [] flag = None for line_num in range(start_line, len(self.output)): # collect flags line = self.output[line_num].rstrip() if not line: # blank continue mobj = self.module_py_re.match(line) if mobj: # start of a new module modname = mobj.group(1) logging.debug('Module: %s' % line) if flag: modlist.append(flag) self.module_list.append(modname) self.modules.setdefault(modname, []) modlist = self.modules[modname] flag = None continue mobj = self.flag_py_re.match(line) if mobj: # start of a new flag if flag: modlist.append(flag) logging.debug('Flag: %s' % line) flag = Flag(mobj.group(1), mobj.group(2)) continue if not flag: # continuation of a flag logging.error('Flag info, but no current flag "%s"' % line) mobj = self.flag_default_py_re.match(line) if mobj: # (default: '...') flag.default = mobj.group(1) logging.debug('Fdef: %s' % line) continue mobj = self.flag_tips_py_re.match(line) if mobj: # (tips) flag.tips = mobj.group(1) logging.debug('Ftip: %s' % line) continue if flag and flag.help: flag.help += line # multiflags tack on an extra line else: logging.info('Extra: %s' % line) if flag: modlist.append(flag)
python
def ParsePythonFlags(self, start_line=0): """Parse python/swig style flags.""" modname = None # name of current module modlist = [] flag = None for line_num in range(start_line, len(self.output)): # collect flags line = self.output[line_num].rstrip() if not line: # blank continue mobj = self.module_py_re.match(line) if mobj: # start of a new module modname = mobj.group(1) logging.debug('Module: %s' % line) if flag: modlist.append(flag) self.module_list.append(modname) self.modules.setdefault(modname, []) modlist = self.modules[modname] flag = None continue mobj = self.flag_py_re.match(line) if mobj: # start of a new flag if flag: modlist.append(flag) logging.debug('Flag: %s' % line) flag = Flag(mobj.group(1), mobj.group(2)) continue if not flag: # continuation of a flag logging.error('Flag info, but no current flag "%s"' % line) mobj = self.flag_default_py_re.match(line) if mobj: # (default: '...') flag.default = mobj.group(1) logging.debug('Fdef: %s' % line) continue mobj = self.flag_tips_py_re.match(line) if mobj: # (tips) flag.tips = mobj.group(1) logging.debug('Ftip: %s' % line) continue if flag and flag.help: flag.help += line # multiflags tack on an extra line else: logging.info('Extra: %s' % line) if flag: modlist.append(flag)
['def', 'ParsePythonFlags', '(', 'self', ',', 'start_line', '=', '0', ')', ':', 'modname', '=', 'None', '# name of current module', 'modlist', '=', '[', ']', 'flag', '=', 'None', 'for', 'line_num', 'in', 'range', '(', 'start_line', ',', 'len', '(', 'self', '.', 'output', ')', ')', ':', '# collect flags', 'line', '=', 'self', '.', 'output', '[', 'line_num', ']', '.', 'rstrip', '(', ')', 'if', 'not', 'line', ':', '# blank', 'continue', 'mobj', '=', 'self', '.', 'module_py_re', '.', 'match', '(', 'line', ')', 'if', 'mobj', ':', '# start of a new module', 'modname', '=', 'mobj', '.', 'group', '(', '1', ')', 'logging', '.', 'debug', '(', "'Module: %s'", '%', 'line', ')', 'if', 'flag', ':', 'modlist', '.', 'append', '(', 'flag', ')', 'self', '.', 'module_list', '.', 'append', '(', 'modname', ')', 'self', '.', 'modules', '.', 'setdefault', '(', 'modname', ',', '[', ']', ')', 'modlist', '=', 'self', '.', 'modules', '[', 'modname', ']', 'flag', '=', 'None', 'continue', 'mobj', '=', 'self', '.', 'flag_py_re', '.', 'match', '(', 'line', ')', 'if', 'mobj', ':', '# start of a new flag', 'if', 'flag', ':', 'modlist', '.', 'append', '(', 'flag', ')', 'logging', '.', 'debug', '(', "'Flag: %s'", '%', 'line', ')', 'flag', '=', 'Flag', '(', 'mobj', '.', 'group', '(', '1', ')', ',', 'mobj', '.', 'group', '(', '2', ')', ')', 'continue', 'if', 'not', 'flag', ':', '# continuation of a flag', 'logging', '.', 'error', '(', '\'Flag info, but no current flag "%s"\'', '%', 'line', ')', 'mobj', '=', 'self', '.', 'flag_default_py_re', '.', 'match', '(', 'line', ')', 'if', 'mobj', ':', "# (default: '...')", 'flag', '.', 'default', '=', 'mobj', '.', 'group', '(', '1', ')', 'logging', '.', 'debug', '(', "'Fdef: %s'", '%', 'line', ')', 'continue', 'mobj', '=', 'self', '.', 'flag_tips_py_re', '.', 'match', '(', 'line', ')', 'if', 'mobj', ':', '# (tips)', 'flag', '.', 'tips', '=', 'mobj', '.', 'group', '(', '1', ')', 'logging', '.', 'debug', '(', "'Ftip: %s'", '%', 'line', ')', 'continue', 'if', 'flag', 'and', 'flag', '.', 'help', ':', 'flag', '.', 'help', '+=', 'line', '# multiflags tack on an extra line', 'else', ':', 'logging', '.', 'info', '(', "'Extra: %s'", '%', 'line', ')', 'if', 'flag', ':', 'modlist', '.', 'append', '(', 'flag', ')']
Parse python/swig style flags.
['Parse', 'python', '/', 'swig', 'style', 'flags', '.']
train
https://github.com/google/python-gflags/blob/4f06c3d0d6cbe9b1fb90ee9fb1c082b3bf9285f6/gflags2man.py#L274-L321
288
nuagenetworks/bambou
bambou/nurest_login_controller.py
NURESTLoginController.get_authentication_header
def get_authentication_header(self, user=None, api_key=None, password=None, certificate=None): """ Return authenication string to place in Authorization Header If API Token is set, it'll be used. Otherwise, the clear text password will be sent. Users of NURESTLoginController are responsible to clean the password property. Returns: Returns the XREST Authentication string with API Key or user password encoded. """ if not user: user = self.user if not api_key: api_key = self.api_key if not password: password = self.password if not password: password = self.password if not certificate: certificate = self._certificate if certificate: return "XREST %s" % urlsafe_b64encode("{}:".format(user).encode('utf-8')).decode('utf-8') if api_key: return "XREST %s" % urlsafe_b64encode("{}:{}".format(user, api_key).encode('utf-8')).decode('utf-8') return "XREST %s" % urlsafe_b64encode("{}:{}".format(user, password).encode('utf-8')).decode('utf-8')
python
def get_authentication_header(self, user=None, api_key=None, password=None, certificate=None): """ Return authenication string to place in Authorization Header If API Token is set, it'll be used. Otherwise, the clear text password will be sent. Users of NURESTLoginController are responsible to clean the password property. Returns: Returns the XREST Authentication string with API Key or user password encoded. """ if not user: user = self.user if not api_key: api_key = self.api_key if not password: password = self.password if not password: password = self.password if not certificate: certificate = self._certificate if certificate: return "XREST %s" % urlsafe_b64encode("{}:".format(user).encode('utf-8')).decode('utf-8') if api_key: return "XREST %s" % urlsafe_b64encode("{}:{}".format(user, api_key).encode('utf-8')).decode('utf-8') return "XREST %s" % urlsafe_b64encode("{}:{}".format(user, password).encode('utf-8')).decode('utf-8')
['def', 'get_authentication_header', '(', 'self', ',', 'user', '=', 'None', ',', 'api_key', '=', 'None', ',', 'password', '=', 'None', ',', 'certificate', '=', 'None', ')', ':', 'if', 'not', 'user', ':', 'user', '=', 'self', '.', 'user', 'if', 'not', 'api_key', ':', 'api_key', '=', 'self', '.', 'api_key', 'if', 'not', 'password', ':', 'password', '=', 'self', '.', 'password', 'if', 'not', 'password', ':', 'password', '=', 'self', '.', 'password', 'if', 'not', 'certificate', ':', 'certificate', '=', 'self', '.', '_certificate', 'if', 'certificate', ':', 'return', '"XREST %s"', '%', 'urlsafe_b64encode', '(', '"{}:"', '.', 'format', '(', 'user', ')', '.', 'encode', '(', "'utf-8'", ')', ')', '.', 'decode', '(', "'utf-8'", ')', 'if', 'api_key', ':', 'return', '"XREST %s"', '%', 'urlsafe_b64encode', '(', '"{}:{}"', '.', 'format', '(', 'user', ',', 'api_key', ')', '.', 'encode', '(', "'utf-8'", ')', ')', '.', 'decode', '(', "'utf-8'", ')', 'return', '"XREST %s"', '%', 'urlsafe_b64encode', '(', '"{}:{}"', '.', 'format', '(', 'user', ',', 'password', ')', '.', 'encode', '(', "'utf-8'", ')', ')', '.', 'decode', '(', "'utf-8'", ')']
Return authenication string to place in Authorization Header If API Token is set, it'll be used. Otherwise, the clear text password will be sent. Users of NURESTLoginController are responsible to clean the password property. Returns: Returns the XREST Authentication string with API Key or user password encoded.
['Return', 'authenication', 'string', 'to', 'place', 'in', 'Authorization', 'Header']
train
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_login_controller.py#L223-L256
289
VIVelev/PyDojoML
dojo/svm/libsvm/commonutil.py
evaluations
def evaluations(ty, pv, useScipy = True): """ evaluations(ty, pv, useScipy) -> (ACC, MSE, SCC) ty, pv: list, tuple or ndarray useScipy: convert ty, pv to ndarray, and use scipy functions for the evaluation Calculate accuracy, mean squared error and squared correlation coefficient using the true values (ty) and predicted values (pv). """ if scipy != None and useScipy: return evaluations_scipy(scipy.asarray(ty), scipy.asarray(pv)) if len(ty) != len(pv): raise ValueError("len(ty) must be equal to len(pv)") total_correct = total_error = 0 sumv = sumy = sumvv = sumyy = sumvy = 0 for v, y in zip(pv, ty): if y == v: total_correct += 1 total_error += (v-y)*(v-y) sumv += v sumy += y sumvv += v*v sumyy += y*y sumvy += v*y l = len(ty) ACC = 100.0*total_correct/l MSE = total_error/l try: SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy)) except: SCC = float('nan') return (float(ACC), float(MSE), float(SCC))
python
def evaluations(ty, pv, useScipy = True): """ evaluations(ty, pv, useScipy) -> (ACC, MSE, SCC) ty, pv: list, tuple or ndarray useScipy: convert ty, pv to ndarray, and use scipy functions for the evaluation Calculate accuracy, mean squared error and squared correlation coefficient using the true values (ty) and predicted values (pv). """ if scipy != None and useScipy: return evaluations_scipy(scipy.asarray(ty), scipy.asarray(pv)) if len(ty) != len(pv): raise ValueError("len(ty) must be equal to len(pv)") total_correct = total_error = 0 sumv = sumy = sumvv = sumyy = sumvy = 0 for v, y in zip(pv, ty): if y == v: total_correct += 1 total_error += (v-y)*(v-y) sumv += v sumy += y sumvv += v*v sumyy += y*y sumvy += v*y l = len(ty) ACC = 100.0*total_correct/l MSE = total_error/l try: SCC = ((l*sumvy-sumv*sumy)*(l*sumvy-sumv*sumy))/((l*sumvv-sumv*sumv)*(l*sumyy-sumy*sumy)) except: SCC = float('nan') return (float(ACC), float(MSE), float(SCC))
['def', 'evaluations', '(', 'ty', ',', 'pv', ',', 'useScipy', '=', 'True', ')', ':', 'if', 'scipy', '!=', 'None', 'and', 'useScipy', ':', 'return', 'evaluations_scipy', '(', 'scipy', '.', 'asarray', '(', 'ty', ')', ',', 'scipy', '.', 'asarray', '(', 'pv', ')', ')', 'if', 'len', '(', 'ty', ')', '!=', 'len', '(', 'pv', ')', ':', 'raise', 'ValueError', '(', '"len(ty) must be equal to len(pv)"', ')', 'total_correct', '=', 'total_error', '=', '0', 'sumv', '=', 'sumy', '=', 'sumvv', '=', 'sumyy', '=', 'sumvy', '=', '0', 'for', 'v', ',', 'y', 'in', 'zip', '(', 'pv', ',', 'ty', ')', ':', 'if', 'y', '==', 'v', ':', 'total_correct', '+=', '1', 'total_error', '+=', '(', 'v', '-', 'y', ')', '*', '(', 'v', '-', 'y', ')', 'sumv', '+=', 'v', 'sumy', '+=', 'y', 'sumvv', '+=', 'v', '*', 'v', 'sumyy', '+=', 'y', '*', 'y', 'sumvy', '+=', 'v', '*', 'y', 'l', '=', 'len', '(', 'ty', ')', 'ACC', '=', '100.0', '*', 'total_correct', '/', 'l', 'MSE', '=', 'total_error', '/', 'l', 'try', ':', 'SCC', '=', '(', '(', 'l', '*', 'sumvy', '-', 'sumv', '*', 'sumy', ')', '*', '(', 'l', '*', 'sumvy', '-', 'sumv', '*', 'sumy', ')', ')', '/', '(', '(', 'l', '*', 'sumvv', '-', 'sumv', '*', 'sumv', ')', '*', '(', 'l', '*', 'sumyy', '-', 'sumy', '*', 'sumy', ')', ')', 'except', ':', 'SCC', '=', 'float', '(', "'nan'", ')', 'return', '(', 'float', '(', 'ACC', ')', ',', 'float', '(', 'MSE', ')', ',', 'float', '(', 'SCC', ')', ')']
evaluations(ty, pv, useScipy) -> (ACC, MSE, SCC) ty, pv: list, tuple or ndarray useScipy: convert ty, pv to ndarray, and use scipy functions for the evaluation Calculate accuracy, mean squared error and squared correlation coefficient using the true values (ty) and predicted values (pv).
['evaluations', '(', 'ty', 'pv', 'useScipy', ')', '-', '>', '(', 'ACC', 'MSE', 'SCC', ')', 'ty', 'pv', ':', 'list', 'tuple', 'or', 'ndarray', 'useScipy', ':', 'convert', 'ty', 'pv', 'to', 'ndarray', 'and', 'use', 'scipy', 'functions', 'for', 'the', 'evaluation']
train
https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/svm/libsvm/commonutil.py#L85-L116
290
edeposit/edeposit.amqp.pdfgen
src/edeposit/amqp/pdfgen/specialization.py
get_contract
def get_contract(firma, pravni_forma, sidlo, ic, dic, zastoupen): """ Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file. """ contract_fn = _resource_context( "Licencni_smlouva_o_dodavani_elektronickych_publikaci" "_a_jejich_uziti.rst" ) # load contract with open(contract_fn) as f: contract = f.read()#.decode("utf-8").encode("utf-8") # make sure that `firma` has its heading mark firma = firma.strip() firma = firma + "\n" + ((len(firma) + 1) * "-") # patch template contract = Template(contract).substitute( firma=firma, pravni_forma=pravni_forma.strip(), sidlo=sidlo.strip(), ic=ic.strip(), dic=dic.strip(), zastoupen=zastoupen.strip(), resources_path=RES_PATH ) return gen_pdf( contract, open(_resource_context("style.json")).read(), )
python
def get_contract(firma, pravni_forma, sidlo, ic, dic, zastoupen): """ Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file. """ contract_fn = _resource_context( "Licencni_smlouva_o_dodavani_elektronickych_publikaci" "_a_jejich_uziti.rst" ) # load contract with open(contract_fn) as f: contract = f.read()#.decode("utf-8").encode("utf-8") # make sure that `firma` has its heading mark firma = firma.strip() firma = firma + "\n" + ((len(firma) + 1) * "-") # patch template contract = Template(contract).substitute( firma=firma, pravni_forma=pravni_forma.strip(), sidlo=sidlo.strip(), ic=ic.strip(), dic=dic.strip(), zastoupen=zastoupen.strip(), resources_path=RES_PATH ) return gen_pdf( contract, open(_resource_context("style.json")).read(), )
['def', 'get_contract', '(', 'firma', ',', 'pravni_forma', ',', 'sidlo', ',', 'ic', ',', 'dic', ',', 'zastoupen', ')', ':', 'contract_fn', '=', '_resource_context', '(', '"Licencni_smlouva_o_dodavani_elektronickych_publikaci"', '"_a_jejich_uziti.rst"', ')', '# load contract', 'with', 'open', '(', 'contract_fn', ')', 'as', 'f', ':', 'contract', '=', 'f', '.', 'read', '(', ')', '#.decode("utf-8").encode("utf-8")', '# make sure that `firma` has its heading mark', 'firma', '=', 'firma', '.', 'strip', '(', ')', 'firma', '=', 'firma', '+', '"\\n"', '+', '(', '(', 'len', '(', 'firma', ')', '+', '1', ')', '*', '"-"', ')', '# patch template', 'contract', '=', 'Template', '(', 'contract', ')', '.', 'substitute', '(', 'firma', '=', 'firma', ',', 'pravni_forma', '=', 'pravni_forma', '.', 'strip', '(', ')', ',', 'sidlo', '=', 'sidlo', '.', 'strip', '(', ')', ',', 'ic', '=', 'ic', '.', 'strip', '(', ')', ',', 'dic', '=', 'dic', '.', 'strip', '(', ')', ',', 'zastoupen', '=', 'zastoupen', '.', 'strip', '(', ')', ',', 'resources_path', '=', 'RES_PATH', ')', 'return', 'gen_pdf', '(', 'contract', ',', 'open', '(', '_resource_context', '(', '"style.json"', ')', ')', '.', 'read', '(', ')', ',', ')']
Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file.
['Compose', 'contract', 'and', 'create', 'PDF', '.']
train
https://github.com/edeposit/edeposit.amqp.pdfgen/blob/1022d6d01196f4928d664a71e49273c2d8c67e63/src/edeposit/amqp/pdfgen/specialization.py#L43-L85
291
gwastro/pycbc
pycbc/libutils.py
pkg_config
def pkg_config(pkg_libraries): """Use pkg-config to query for the location of libraries, library directories, and header directories Arguments: pkg_libries(list): A list of packages as strings Returns: libraries(list), library_dirs(list), include_dirs(list) """ libraries=[] library_dirs=[] include_dirs=[] # Check that we have the packages for pkg in pkg_libraries: if os.system('pkg-config --exists %s 2>/dev/null' % pkg) == 0: pass else: print("Could not find library {0}".format(pkg)) sys.exit(1) # Get the pck-config flags if len(pkg_libraries)>0 : # PKG_CONFIG_ALLOW_SYSTEM_CFLAGS explicitly lists system paths. # On system-wide LAL installs, this is needed for swig to find lalswig.i for token in getoutput("PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1 pkg-config --libs --cflags %s" % ' '.join(pkg_libraries)).split(): if token.startswith("-l"): libraries.append(token[2:]) elif token.startswith("-L"): library_dirs.append(token[2:]) elif token.startswith("-I"): include_dirs.append(token[2:]) return libraries, library_dirs, include_dirs
python
def pkg_config(pkg_libraries): """Use pkg-config to query for the location of libraries, library directories, and header directories Arguments: pkg_libries(list): A list of packages as strings Returns: libraries(list), library_dirs(list), include_dirs(list) """ libraries=[] library_dirs=[] include_dirs=[] # Check that we have the packages for pkg in pkg_libraries: if os.system('pkg-config --exists %s 2>/dev/null' % pkg) == 0: pass else: print("Could not find library {0}".format(pkg)) sys.exit(1) # Get the pck-config flags if len(pkg_libraries)>0 : # PKG_CONFIG_ALLOW_SYSTEM_CFLAGS explicitly lists system paths. # On system-wide LAL installs, this is needed for swig to find lalswig.i for token in getoutput("PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1 pkg-config --libs --cflags %s" % ' '.join(pkg_libraries)).split(): if token.startswith("-l"): libraries.append(token[2:]) elif token.startswith("-L"): library_dirs.append(token[2:]) elif token.startswith("-I"): include_dirs.append(token[2:]) return libraries, library_dirs, include_dirs
['def', 'pkg_config', '(', 'pkg_libraries', ')', ':', 'libraries', '=', '[', ']', 'library_dirs', '=', '[', ']', 'include_dirs', '=', '[', ']', '# Check that we have the packages', 'for', 'pkg', 'in', 'pkg_libraries', ':', 'if', 'os', '.', 'system', '(', "'pkg-config --exists %s 2>/dev/null'", '%', 'pkg', ')', '==', '0', ':', 'pass', 'else', ':', 'print', '(', '"Could not find library {0}"', '.', 'format', '(', 'pkg', ')', ')', 'sys', '.', 'exit', '(', '1', ')', '# Get the pck-config flags', 'if', 'len', '(', 'pkg_libraries', ')', '>', '0', ':', '# PKG_CONFIG_ALLOW_SYSTEM_CFLAGS explicitly lists system paths.', '# On system-wide LAL installs, this is needed for swig to find lalswig.i', 'for', 'token', 'in', 'getoutput', '(', '"PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=1 pkg-config --libs --cflags %s"', '%', "' '", '.', 'join', '(', 'pkg_libraries', ')', ')', '.', 'split', '(', ')', ':', 'if', 'token', '.', 'startswith', '(', '"-l"', ')', ':', 'libraries', '.', 'append', '(', 'token', '[', '2', ':', ']', ')', 'elif', 'token', '.', 'startswith', '(', '"-L"', ')', ':', 'library_dirs', '.', 'append', '(', 'token', '[', '2', ':', ']', ')', 'elif', 'token', '.', 'startswith', '(', '"-I"', ')', ':', 'include_dirs', '.', 'append', '(', 'token', '[', '2', ':', ']', ')', 'return', 'libraries', ',', 'library_dirs', ',', 'include_dirs']
Use pkg-config to query for the location of libraries, library directories, and header directories Arguments: pkg_libries(list): A list of packages as strings Returns: libraries(list), library_dirs(list), include_dirs(list)
['Use', 'pkg', '-', 'config', 'to', 'query', 'for', 'the', 'location', 'of', 'libraries', 'library', 'directories', 'and', 'header', 'directories']
train
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/libutils.py#L32-L66
292
spyder-ide/spyder
spyder/plugins/plots/widgets/figurebrowser.py
ThumbnailScrollBar.save_all_figures_as
def save_all_figures_as(self): """Save all the figures to a file.""" self.redirect_stdio.emit(False) dirname = getexistingdirectory(self, caption='Save all figures', basedir=getcwd_or_home()) self.redirect_stdio.emit(True) if dirname: return self.save_all_figures_todir(dirname)
python
def save_all_figures_as(self): """Save all the figures to a file.""" self.redirect_stdio.emit(False) dirname = getexistingdirectory(self, caption='Save all figures', basedir=getcwd_or_home()) self.redirect_stdio.emit(True) if dirname: return self.save_all_figures_todir(dirname)
['def', 'save_all_figures_as', '(', 'self', ')', ':', 'self', '.', 'redirect_stdio', '.', 'emit', '(', 'False', ')', 'dirname', '=', 'getexistingdirectory', '(', 'self', ',', 'caption', '=', "'Save all figures'", ',', 'basedir', '=', 'getcwd_or_home', '(', ')', ')', 'self', '.', 'redirect_stdio', '.', 'emit', '(', 'True', ')', 'if', 'dirname', ':', 'return', 'self', '.', 'save_all_figures_todir', '(', 'dirname', ')']
Save all the figures to a file.
['Save', 'all', 'the', 'figures', 'to', 'a', 'file', '.']
train
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/plots/widgets/figurebrowser.py#L573-L580
293
FNNDSC/pfmisc
pfmisc/C_snode.py
C_stree.b_pathOK
def b_pathOK(self, al_path): """ Checks if the absolute path specified in the al_path is valid for current tree """ b_OK = True try: self.l_allPaths.index(al_path) except: b_OK = False return b_OK
python
def b_pathOK(self, al_path): """ Checks if the absolute path specified in the al_path is valid for current tree """ b_OK = True try: self.l_allPaths.index(al_path) except: b_OK = False return b_OK
['def', 'b_pathOK', '(', 'self', ',', 'al_path', ')', ':', 'b_OK', '=', 'True', 'try', ':', 'self', '.', 'l_allPaths', '.', 'index', '(', 'al_path', ')', 'except', ':', 'b_OK', '=', 'False', 'return', 'b_OK']
Checks if the absolute path specified in the al_path is valid for current tree
['Checks', 'if', 'the', 'absolute', 'path', 'specified', 'in', 'the', 'al_path', 'is', 'valid', 'for', 'current', 'tree']
train
https://github.com/FNNDSC/pfmisc/blob/960b4d6135fcc50bed0a8e55db2ab1ddad9b99d8/pfmisc/C_snode.py#L909-L917
294
PmagPy/PmagPy
pmagpy/func.py
array_map
def array_map(f, ar): "Apply an ordinary function to all values in an array." flat_ar = ravel(ar) out = zeros(len(flat_ar), flat_ar.typecode()) for i in range(len(flat_ar)): out[i] = f(flat_ar[i]) out.shape = ar.shape return out
python
def array_map(f, ar): "Apply an ordinary function to all values in an array." flat_ar = ravel(ar) out = zeros(len(flat_ar), flat_ar.typecode()) for i in range(len(flat_ar)): out[i] = f(flat_ar[i]) out.shape = ar.shape return out
['def', 'array_map', '(', 'f', ',', 'ar', ')', ':', 'flat_ar', '=', 'ravel', '(', 'ar', ')', 'out', '=', 'zeros', '(', 'len', '(', 'flat_ar', ')', ',', 'flat_ar', '.', 'typecode', '(', ')', ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'flat_ar', ')', ')', ':', 'out', '[', 'i', ']', '=', 'f', '(', 'flat_ar', '[', 'i', ']', ')', 'out', '.', 'shape', '=', 'ar', '.', 'shape', 'return', 'out']
Apply an ordinary function to all values in an array.
['Apply', 'an', 'ordinary', 'function', 'to', 'all', 'values', 'in', 'an', 'array', '.']
train
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/func.py#L294-L301
295
samuraisam/django-json-rpc
jsonrpc/__init__.py
_inject_args
def _inject_args(sig, types): """ A function to inject arguments manually into a method signature before it's been parsed. If using keyword arguments use 'kw=type' instead in the types array. sig the string signature types a list of types to be inserted Returns the altered signature. """ if '(' in sig: parts = sig.split('(') sig = '%s(%s%s%s' % ( parts[0], ', '.join(types), (', ' if parts[1].index(')') > 0 else ''), parts[1]) else: sig = '%s(%s)' % (sig, ', '.join(types)) return sig
python
def _inject_args(sig, types): """ A function to inject arguments manually into a method signature before it's been parsed. If using keyword arguments use 'kw=type' instead in the types array. sig the string signature types a list of types to be inserted Returns the altered signature. """ if '(' in sig: parts = sig.split('(') sig = '%s(%s%s%s' % ( parts[0], ', '.join(types), (', ' if parts[1].index(')') > 0 else ''), parts[1]) else: sig = '%s(%s)' % (sig, ', '.join(types)) return sig
['def', '_inject_args', '(', 'sig', ',', 'types', ')', ':', 'if', "'('", 'in', 'sig', ':', 'parts', '=', 'sig', '.', 'split', '(', "'('", ')', 'sig', '=', "'%s(%s%s%s'", '%', '(', 'parts', '[', '0', ']', ',', "', '", '.', 'join', '(', 'types', ')', ',', '(', "', '", 'if', 'parts', '[', '1', ']', '.', 'index', '(', "')'", ')', '>', '0', 'else', "''", ')', ',', 'parts', '[', '1', ']', ')', 'else', ':', 'sig', '=', "'%s(%s)'", '%', '(', 'sig', ',', "', '", '.', 'join', '(', 'types', ')', ')', 'return', 'sig']
A function to inject arguments manually into a method signature before it's been parsed. If using keyword arguments use 'kw=type' instead in the types array. sig the string signature types a list of types to be inserted Returns the altered signature.
['A', 'function', 'to', 'inject', 'arguments', 'manually', 'into', 'a', 'method', 'signature', 'before', 'it', 's', 'been', 'parsed', '.', 'If', 'using', 'keyword', 'arguments', 'use', 'kw', '=', 'type', 'instead', 'in', 'the', 'types', 'array', '.']
train
https://github.com/samuraisam/django-json-rpc/blob/a88d744d960e828f3eb21265da0f10a694b8ebcf/jsonrpc/__init__.py#L120-L138
296
matthew-brett/delocate
delocate/libsana.py
get_prefix_stripper
def get_prefix_stripper(strip_prefix): """ Return function to strip `strip_prefix` prefix from string if present Parameters ---------- prefix : str Prefix to strip from the beginning of string if present Returns ------- stripper : func function such that ``stripper(a_string)`` will strip `prefix` from ``a_string`` if present, otherwise pass ``a_string`` unmodified """ n = len(strip_prefix) def stripper(path): return path if not path.startswith(strip_prefix) else path[n:] return stripper
python
def get_prefix_stripper(strip_prefix): """ Return function to strip `strip_prefix` prefix from string if present Parameters ---------- prefix : str Prefix to strip from the beginning of string if present Returns ------- stripper : func function such that ``stripper(a_string)`` will strip `prefix` from ``a_string`` if present, otherwise pass ``a_string`` unmodified """ n = len(strip_prefix) def stripper(path): return path if not path.startswith(strip_prefix) else path[n:] return stripper
['def', 'get_prefix_stripper', '(', 'strip_prefix', ')', ':', 'n', '=', 'len', '(', 'strip_prefix', ')', 'def', 'stripper', '(', 'path', ')', ':', 'return', 'path', 'if', 'not', 'path', '.', 'startswith', '(', 'strip_prefix', ')', 'else', 'path', '[', 'n', ':', ']', 'return', 'stripper']
Return function to strip `strip_prefix` prefix from string if present Parameters ---------- prefix : str Prefix to strip from the beginning of string if present Returns ------- stripper : func function such that ``stripper(a_string)`` will strip `prefix` from ``a_string`` if present, otherwise pass ``a_string`` unmodified
['Return', 'function', 'to', 'strip', 'strip_prefix', 'prefix', 'from', 'string', 'if', 'present']
train
https://github.com/matthew-brett/delocate/blob/ed48de15fce31c3f52f1a9f32cae1b02fc55aa60/delocate/libsana.py#L107-L124
297
cisco-sas/kitty
kitty/model/low_level/container.py
Container.push
def push(self, field): ''' Add a field to the container, if the field is a Container itself, it should be poped() when done pushing into it :param field: BaseField to push ''' kassert.is_of_types(field, BaseField) container = self._container() field.enclosing = self if isinstance(field, Container): self._containers.append(field) if container: container.push(field) else: name = field.get_name() if name in self._fields_dict: raise KittyException('field with the name (%s) already exists in this container' % (name)) if name: self._fields_dict[name] = field self._fields.append(field) return True
python
def push(self, field): ''' Add a field to the container, if the field is a Container itself, it should be poped() when done pushing into it :param field: BaseField to push ''' kassert.is_of_types(field, BaseField) container = self._container() field.enclosing = self if isinstance(field, Container): self._containers.append(field) if container: container.push(field) else: name = field.get_name() if name in self._fields_dict: raise KittyException('field with the name (%s) already exists in this container' % (name)) if name: self._fields_dict[name] = field self._fields.append(field) return True
['def', 'push', '(', 'self', ',', 'field', ')', ':', 'kassert', '.', 'is_of_types', '(', 'field', ',', 'BaseField', ')', 'container', '=', 'self', '.', '_container', '(', ')', 'field', '.', 'enclosing', '=', 'self', 'if', 'isinstance', '(', 'field', ',', 'Container', ')', ':', 'self', '.', '_containers', '.', 'append', '(', 'field', ')', 'if', 'container', ':', 'container', '.', 'push', '(', 'field', ')', 'else', ':', 'name', '=', 'field', '.', 'get_name', '(', ')', 'if', 'name', 'in', 'self', '.', '_fields_dict', ':', 'raise', 'KittyException', '(', "'field with the name (%s) already exists in this container'", '%', '(', 'name', ')', ')', 'if', 'name', ':', 'self', '.', '_fields_dict', '[', 'name', ']', '=', 'field', 'self', '.', '_fields', '.', 'append', '(', 'field', ')', 'return', 'True']
Add a field to the container, if the field is a Container itself, it should be poped() when done pushing into it :param field: BaseField to push
['Add', 'a', 'field', 'to', 'the', 'container', 'if', 'the', 'field', 'is', 'a', 'Container', 'itself', 'it', 'should', 'be', 'poped', '()', 'when', 'done', 'pushing', 'into', 'it']
train
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/model/low_level/container.py#L304-L324
298
titusjan/argos
argos/collect/collector.py
Collector._deleteSpinBoxes
def _deleteSpinBoxes(self, row): """ Removes all spinboxes """ tree = self.tree model = self.tree.model() for col, spinBox in enumerate(self._spinBoxes, self.COL_FIRST_COMBO + self.maxCombos): spinBox.valueChanged[int].disconnect(self._spinboxValueChanged) tree.setIndexWidget(model.index(row, col), None) self._spinBoxes = [] self._setColumnCountForContents()
python
def _deleteSpinBoxes(self, row): """ Removes all spinboxes """ tree = self.tree model = self.tree.model() for col, spinBox in enumerate(self._spinBoxes, self.COL_FIRST_COMBO + self.maxCombos): spinBox.valueChanged[int].disconnect(self._spinboxValueChanged) tree.setIndexWidget(model.index(row, col), None) self._spinBoxes = [] self._setColumnCountForContents()
['def', '_deleteSpinBoxes', '(', 'self', ',', 'row', ')', ':', 'tree', '=', 'self', '.', 'tree', 'model', '=', 'self', '.', 'tree', '.', 'model', '(', ')', 'for', 'col', ',', 'spinBox', 'in', 'enumerate', '(', 'self', '.', '_spinBoxes', ',', 'self', '.', 'COL_FIRST_COMBO', '+', 'self', '.', 'maxCombos', ')', ':', 'spinBox', '.', 'valueChanged', '[', 'int', ']', '.', 'disconnect', '(', 'self', '.', '_spinboxValueChanged', ')', 'tree', '.', 'setIndexWidget', '(', 'model', '.', 'index', '(', 'row', ',', 'col', ')', ',', 'None', ')', 'self', '.', '_spinBoxes', '=', '[', ']', 'self', '.', '_setColumnCountForContents', '(', ')']
Removes all spinboxes
['Removes', 'all', 'spinboxes']
train
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/collect/collector.py#L414-L425
299
GPflow/GPflow
gpflow/models/gpr.py
GPR._build_predict
def _build_predict(self, Xnew, full_cov=False): """ Xnew is a data matrix, the points at which we want to predict. This method computes p(F* | Y) where F* are points on the GP at Xnew, Y are noisy observations at X. """ y = self.Y - self.mean_function(self.X) Kmn = self.kern.K(self.X, Xnew) Kmm_sigma = self.kern.K(self.X) + tf.eye(tf.shape(self.X)[0], dtype=settings.float_type) * self.likelihood.variance Knn = self.kern.K(Xnew) if full_cov else self.kern.Kdiag(Xnew) f_mean, f_var = base_conditional(Kmn, Kmm_sigma, Knn, y, full_cov=full_cov, white=False) # N x P, N x P or P x N x N return f_mean + self.mean_function(Xnew), f_var
python
def _build_predict(self, Xnew, full_cov=False): """ Xnew is a data matrix, the points at which we want to predict. This method computes p(F* | Y) where F* are points on the GP at Xnew, Y are noisy observations at X. """ y = self.Y - self.mean_function(self.X) Kmn = self.kern.K(self.X, Xnew) Kmm_sigma = self.kern.K(self.X) + tf.eye(tf.shape(self.X)[0], dtype=settings.float_type) * self.likelihood.variance Knn = self.kern.K(Xnew) if full_cov else self.kern.Kdiag(Xnew) f_mean, f_var = base_conditional(Kmn, Kmm_sigma, Knn, y, full_cov=full_cov, white=False) # N x P, N x P or P x N x N return f_mean + self.mean_function(Xnew), f_var
['def', '_build_predict', '(', 'self', ',', 'Xnew', ',', 'full_cov', '=', 'False', ')', ':', 'y', '=', 'self', '.', 'Y', '-', 'self', '.', 'mean_function', '(', 'self', '.', 'X', ')', 'Kmn', '=', 'self', '.', 'kern', '.', 'K', '(', 'self', '.', 'X', ',', 'Xnew', ')', 'Kmm_sigma', '=', 'self', '.', 'kern', '.', 'K', '(', 'self', '.', 'X', ')', '+', 'tf', '.', 'eye', '(', 'tf', '.', 'shape', '(', 'self', '.', 'X', ')', '[', '0', ']', ',', 'dtype', '=', 'settings', '.', 'float_type', ')', '*', 'self', '.', 'likelihood', '.', 'variance', 'Knn', '=', 'self', '.', 'kern', '.', 'K', '(', 'Xnew', ')', 'if', 'full_cov', 'else', 'self', '.', 'kern', '.', 'Kdiag', '(', 'Xnew', ')', 'f_mean', ',', 'f_var', '=', 'base_conditional', '(', 'Kmn', ',', 'Kmm_sigma', ',', 'Knn', ',', 'y', ',', 'full_cov', '=', 'full_cov', ',', 'white', '=', 'False', ')', '# N x P, N x P or P x N x N', 'return', 'f_mean', '+', 'self', '.', 'mean_function', '(', 'Xnew', ')', ',', 'f_var']
Xnew is a data matrix, the points at which we want to predict. This method computes p(F* | Y) where F* are points on the GP at Xnew, Y are noisy observations at X.
['Xnew', 'is', 'a', 'data', 'matrix', 'the', 'points', 'at', 'which', 'we', 'want', 'to', 'predict', '.']
train
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/models/gpr.py#L80-L96