metadata
base_model: sentence-transformers/all-mpnet-base-v2
datasets:
- code-search-net/code_search_net
language:
- code
library_name: sentence-transformers
metrics:
- pearson_cosine
- spearman_cosine
- pearson_manhattan
- spearman_manhattan
- pearson_euclidean
- spearman_euclidean
- pearson_dot
- spearman_dot
- pearson_max
- spearman_max
pipeline_tag: sentence-similarity
tags:
- sentence-transformers
- sentence-similarity
- feature-extraction
- generated_from_trainer
- dataset_size:20000
- loss:CoSENTLoss
- loss:MultipleNegativesRankingLoss
widget:
- source_sentence: KeypointsOnImage.to_xy_array
sentences:
- |-
def to_xy_array(self):
"""
Convert keypoint coordinates to ``(N,2)`` array.
Returns
-------
(N, 2) ndarray
Array containing the coordinates of all keypoints.
Shape is ``(N,2)`` with coordinates in xy-form.
"""
result = np.zeros((len(self.keypoints), 2), dtype=np.float32)
for i, keypoint in enumerate(self.keypoints):
result[i, 0] = keypoint.x
result[i, 1] = keypoint.y
return result
- |-
def _generateMetricSpecs(options):
""" Generates the Metrics for a given InferenceType
Parameters:
-------------------------------------------------------------------------
options: ExpGenerator options
retval: (metricsList, optimizeMetricLabel)
metricsList: list of metric string names
optimizeMetricLabel: Name of the metric which to optimize over
"""
inferenceType = options['inferenceType']
inferenceArgs = options['inferenceArgs']
predictionSteps = inferenceArgs['predictionSteps']
metricWindow = options['metricWindow']
if metricWindow is None:
metricWindow = int(Configuration.get("nupic.opf.metricWindow"))
metricSpecStrings = []
optimizeMetricLabel = ""
# -----------------------------------------------------------------------
# Generate the metrics specified by the expGenerator paramters
metricSpecStrings.extend(_generateExtraMetricSpecs(options))
# -----------------------------------------------------------------------
optimizeMetricSpec = None
# If using a dynamically computed prediction steps (i.e. when swarming
# over aggregation is requested), then we will plug in the variable
# predictionSteps in place of the statically provided predictionSteps
# from the JSON description.
if options['dynamicPredictionSteps']:
assert len(predictionSteps) == 1
predictionSteps = ['$REPLACE_ME']
# -----------------------------------------------------------------------
# Metrics for temporal prediction
if inferenceType in (InferenceType.TemporalNextStep,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep,
InferenceType.NontemporalClassification,
'MultiStep'):
predictedFieldName, predictedFieldType = _getPredictedField(options)
isCategory = _isCategory(predictedFieldType)
metricNames = ('avg_err',) if isCategory else ('aae', 'altMAPE')
trivialErrorMetric = 'avg_err' if isCategory else 'altMAPE'
oneGramErrorMetric = 'avg_err' if isCategory else 'altMAPE'
movingAverageBaselineName = 'moving_mode' if isCategory else 'moving_mean'
# Multi-step metrics
for metricName in metricNames:
metricSpec, metricLabel = \
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.multiStepBestPredictions,
metric='multiStep',
params={'errorMetric': metricName,
'window':metricWindow,
'steps': predictionSteps},
returnLabel=True)
metricSpecStrings.append(metricSpec)
# If the custom error metric was specified, add that
if options["customErrorMetric"] is not None :
metricParams = dict(options["customErrorMetric"])
metricParams['errorMetric'] = 'custom_error_metric'
metricParams['steps'] = predictionSteps
# If errorWindow is not specified, make it equal to the default window
if not "errorWindow" in metricParams:
metricParams["errorWindow"] = metricWindow
metricSpec, metricLabel =_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.multiStepPredictions,
metric="multiStep",
params=metricParams,
returnLabel=True)
metricSpecStrings.append(metricSpec)
# If this is the first specified step size, optimize for it. Be sure to
# escape special characters since this is a regular expression
optimizeMetricSpec = metricSpec
metricLabel = metricLabel.replace('[', '\\[')
metricLabel = metricLabel.replace(']', '\\]')
optimizeMetricLabel = metricLabel
if options["customErrorMetric"] is not None :
optimizeMetricLabel = ".*custom_error_metric.*"
# Add in the trivial metrics
if options["runBaselines"] \
and inferenceType != InferenceType.NontemporalClassification:
for steps in predictionSteps:
metricSpecStrings.append(
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.prediction,
metric="trivial",
params={'window':metricWindow,
"errorMetric":trivialErrorMetric,
'steps': steps})
)
##Add in the One-Gram baseline error metric
#metricSpecStrings.append(
# _generateMetricSpecString(field=predictedFieldName,
# inferenceElement=InferenceElement.encodings,
# metric="two_gram",
# params={'window':metricWindow,
# "errorMetric":oneGramErrorMetric,
# 'predictionField':predictedFieldName,
# 'steps': steps})
# )
#
#Include the baseline moving mean/mode metric
if isCategory:
metricSpecStrings.append(
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.prediction,
metric=movingAverageBaselineName,
params={'window':metricWindow
,"errorMetric":"avg_err",
"mode_window":200,
"steps": steps})
)
else :
metricSpecStrings.append(
_generateMetricSpecString(field=predictedFieldName,
inferenceElement=InferenceElement.prediction,
metric=movingAverageBaselineName,
params={'window':metricWindow
,"errorMetric":"altMAPE",
"mean_window":200,
"steps": steps})
)
# -----------------------------------------------------------------------
# Metrics for classification
elif inferenceType in (InferenceType.TemporalClassification):
metricName = 'avg_err'
trivialErrorMetric = 'avg_err'
oneGramErrorMetric = 'avg_err'
movingAverageBaselineName = 'moving_mode'
optimizeMetricSpec, optimizeMetricLabel = \
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric=metricName,
params={'window':metricWindow},
returnLabel=True)
metricSpecStrings.append(optimizeMetricSpec)
if options["runBaselines"]:
# If temporal, generate the trivial predictor metric
if inferenceType == InferenceType.TemporalClassification:
metricSpecStrings.append(
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric="trivial",
params={'window':metricWindow,
"errorMetric":trivialErrorMetric})
)
metricSpecStrings.append(
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric="two_gram",
params={'window':metricWindow,
"errorMetric":oneGramErrorMetric})
)
metricSpecStrings.append(
_generateMetricSpecString(inferenceElement=InferenceElement.classification,
metric=movingAverageBaselineName,
params={'window':metricWindow
,"errorMetric":"avg_err",
"mode_window":200})
)
# Custom Error Metric
if not options["customErrorMetric"] == None :
#If errorWindow is not specified, make it equal to the default window
if not "errorWindow" in options["customErrorMetric"]:
options["customErrorMetric"]["errorWindow"] = metricWindow
optimizeMetricSpec = _generateMetricSpecString(
inferenceElement=InferenceElement.classification,
metric="custom",
params=options["customErrorMetric"])
optimizeMetricLabel = ".*custom_error_metric.*"
metricSpecStrings.append(optimizeMetricSpec)
# -----------------------------------------------------------------------
# If plug in the predictionSteps variable for any dynamically generated
# prediction steps
if options['dynamicPredictionSteps']:
for i in range(len(metricSpecStrings)):
metricSpecStrings[i] = metricSpecStrings[i].replace(
"'$REPLACE_ME'", "predictionSteps")
optimizeMetricLabel = optimizeMetricLabel.replace(
"'$REPLACE_ME'", ".*")
return metricSpecStrings, optimizeMetricLabel
- |-
def create_perf_attrib_stats(perf_attrib, risk_exposures):
"""
Takes perf attribution data over a period of time and computes annualized
multifactor alpha, multifactor sharpe, risk exposures.
"""
summary = OrderedDict()
total_returns = perf_attrib['total_returns']
specific_returns = perf_attrib['specific_returns']
common_returns = perf_attrib['common_returns']
summary['Annualized Specific Return'] =\
ep.annual_return(specific_returns)
summary['Annualized Common Return'] =\
ep.annual_return(common_returns)
summary['Annualized Total Return'] =\
ep.annual_return(total_returns)
summary['Specific Sharpe Ratio'] =\
ep.sharpe_ratio(specific_returns)
summary['Cumulative Specific Return'] =\
ep.cum_returns_final(specific_returns)
summary['Cumulative Common Return'] =\
ep.cum_returns_final(common_returns)
summary['Total Returns'] =\
ep.cum_returns_final(total_returns)
summary = pd.Series(summary, name='')
annualized_returns_by_factor = [ep.annual_return(perf_attrib[c])
for c in risk_exposures.columns]
cumulative_returns_by_factor = [ep.cum_returns_final(perf_attrib[c])
for c in risk_exposures.columns]
risk_exposure_summary = pd.DataFrame(
data=OrderedDict([
(
'Average Risk Factor Exposure',
risk_exposures.mean(axis='rows')
),
('Annualized Return', annualized_returns_by_factor),
('Cumulative Return', cumulative_returns_by_factor),
]),
index=risk_exposures.columns,
)
return summary, risk_exposure_summary
- source_sentence: _generateEncoderChoicesV1
sentences:
- |-
def common_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--env_type', help='type of environment, used when the environment type cannot be automatically determined', type=str)
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2')
parser.add_argument('--num_timesteps', type=float, default=1e6),
parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None)
parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None)
parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int)
parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float)
parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str)
parser.add_argument('--save_video_interval', help='Save video every x steps (0 = disabled)', default=0, type=int)
parser.add_argument('--save_video_length', help='Length of recorded video. Default: 200', default=200, type=int)
parser.add_argument('--play', default=False, action='store_true')
return parser
- |-
def check_intraday(estimate, returns, positions, transactions):
"""
Logic for checking if a strategy is intraday and processing it.
Parameters
----------
estimate: boolean or str, optional
Approximate returns for intraday strategies.
See description in tears.create_full_tear_sheet.
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
Returns
-------
pd.DataFrame
Daily net position values, adjusted for intraday movement.
"""
if estimate == 'infer':
if positions is not None and transactions is not None:
if detect_intraday(positions, transactions):
warnings.warn('Detected intraday strategy; inferring positi' +
'ons from transactions. Set estimate_intraday' +
'=False to disable.')
return estimate_intraday(returns, positions, transactions)
else:
return positions
else:
return positions
elif estimate:
if positions is not None and transactions is not None:
return estimate_intraday(returns, positions, transactions)
else:
raise ValueError('Positions and txns needed to estimate intraday')
else:
return positions
- |-
def _generateEncoderChoicesV1(fieldInfo):
""" Return a list of possible encoder parameter combinations for the given
field and the default aggregation function to use. Each parameter combination
is a dict defining the parameters for the encoder. Here is an example
return value for the encoderChoicesList:
[
None,
{'fieldname':'timestamp',
'name': 'timestamp_timeOfDay',
'type':'DateEncoder'
'dayOfWeek': (7,1)
},
{'fieldname':'timestamp',
'name': 'timestamp_timeOfDay',
'type':'DateEncoder'
'dayOfWeek': (7,3)
},
],
Parameters:
--------------------------------------------------
fieldInfo: item from the 'includedFields' section of the
description JSON object
retval: (encoderChoicesList, aggFunction)
encoderChoicesList: a list of encoder choice lists for this field.
Most fields will generate just 1 encoder choice list.
DateTime fields can generate 2 or more encoder choice lists,
one for dayOfWeek, one for timeOfDay, etc.
aggFunction: name of aggregation function to use for this
field type
"""
width = 7
fieldName = fieldInfo['fieldName']
fieldType = fieldInfo['fieldType']
encoderChoicesList = []
# Scalar?
if fieldType in ['float', 'int']:
aggFunction = 'mean'
encoders = [None]
for n in (13, 50, 150, 500):
encoder = dict(type='ScalarSpaceEncoder', name=fieldName, fieldname=fieldName,
n=n, w=width, clipInput=True,space="absolute")
if 'minValue' in fieldInfo:
encoder['minval'] = fieldInfo['minValue']
if 'maxValue' in fieldInfo:
encoder['maxval'] = fieldInfo['maxValue']
encoders.append(encoder)
encoderChoicesList.append(encoders)
# String?
elif fieldType == 'string':
aggFunction = 'first'
encoders = [None]
encoder = dict(type='SDRCategoryEncoder', name=fieldName,
fieldname=fieldName, n=100, w=width)
encoders.append(encoder)
encoderChoicesList.append(encoders)
# Datetime?
elif fieldType == 'datetime':
aggFunction = 'first'
# First, the time of day representation
encoders = [None]
for radius in (1, 8):
encoder = dict(type='DateEncoder', name='%s_timeOfDay' % (fieldName),
fieldname=fieldName, timeOfDay=(width, radius))
encoders.append(encoder)
encoderChoicesList.append(encoders)
# Now, the day of week representation
encoders = [None]
for radius in (1, 3):
encoder = dict(type='DateEncoder', name='%s_dayOfWeek' % (fieldName),
fieldname=fieldName, dayOfWeek=(width, radius))
encoders.append(encoder)
encoderChoicesList.append(encoders)
else:
raise RuntimeError("Unsupported field type '%s'" % (fieldType))
# Return results
return (encoderChoicesList, aggFunction)
- source_sentence: leaky_relu6
sentences:
- |-
def list_string_to_dict(string):
"""Inputs ``['a', 'b', 'c']``, returns ``{'a': 0, 'b': 1, 'c': 2}``."""
dictionary = {}
for idx, c in enumerate(string):
dictionary.update({c: idx})
return dictionary
- >-
def affine_transform(x, transform_matrix, channel_index=2,
fill_mode='nearest', cval=0., order=1):
"""Return transformed images by given an affine matrix in Scipy format (x is height).
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
transform_matrix : numpy.array
Transform matrix (offset center), can be generated by ``transform_matrix_offset_center``
channel_index : int
Index of channel, default 2.
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0
order : int
The order of interpolation. The order has to be in the range 0-5:
- 0 Nearest-neighbor
- 1 Bi-linear (default)
- 2 Bi-quadratic
- 3 Bi-cubic
- 4 Bi-quartic
- 5 Bi-quintic
- `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
Examples
--------
>>> M_shear = tl.prepro.affine_shear_matrix(intensity=0.2, is_random=False)
>>> M_zoom = tl.prepro.affine_zoom_matrix(zoom_range=0.8)
>>> M_combined = M_shear.dot(M_zoom)
>>> transform_matrix = tl.prepro.transform_matrix_offset_center(M_combined, h, w)
>>> result = tl.prepro.affine_transform(image, transform_matrix)
"""
# transform_matrix = transform_matrix_offset_center()
# asdihasid
# asd
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [
ndi.interpolation.
affine_transform(x_channel, final_affine_matrix, final_offset, order=order, mode=fill_mode, cval=cval)
for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index + 1)
return x
- |-
def leaky_relu6(x, alpha=0.2, name="leaky_relu6"):
""":func:`leaky_relu6` can be used through its shortcut: :func:`tl.act.lrelu6`.
This activation function is a modified version :func:`leaky_relu` introduced by the following paper:
`Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
This activation function also follows the behaviour of the activation function :func:`tf.nn.relu6` introduced by the following paper:
`Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
The function return the following results:
- When x < 0: ``f(x) = alpha_low * x``.
- When x in [0, 6]: ``f(x) = x``.
- When x > 6: ``f(x) = 6``.
Parameters
----------
x : Tensor
Support input type ``float``, ``double``, ``int32``, ``int64``, ``uint8``, ``int16``, or ``int8``.
alpha : float
Slope.
name : str
The function name (optional).
Examples
--------
>>> import tensorlayer as tl
>>> net = tl.layers.DenseLayer(net, 100, act=lambda x : tl.act.leaky_relu6(x, 0.2), name='dense')
Returns
-------
Tensor
A ``Tensor`` in the same type as ``x``.
References
----------
- `Rectifier Nonlinearities Improve Neural Network Acoustic Models [A. L. Maas et al., 2013] <https://ai.stanford.edu/~amaas/papers/relu_hybrid_icml2013_final.pdf>`__
- `Convolutional Deep Belief Networks on CIFAR-10 [A. Krizhevsky, 2010] <http://www.cs.utoronto.ca/~kriz/conv-cifar10-aug2010.pdf>`__
"""
if not isinstance(alpha, tf.Tensor) and not (0 < alpha <= 1):
raise ValueError("`alpha` value must be in [0, 1]`")
with tf.name_scope(name, "leaky_relu6") as name_scope:
x = tf.convert_to_tensor(x, name="features")
return tf.minimum(tf.maximum(x, alpha * x), 6, name=name_scope)
- source_sentence: LineString.contains
sentences:
- >-
def build_act_with_param_noise(make_obs_ph, q_func, num_actions,
scope="deepq", reuse=None, param_noise_filter_func=None):
"""Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that take a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
param_noise_filter_func: tf.Variable -> bool
function that decides whether or not a variable should be perturbed. Only applicable
if param_noise is True. If set to None, default_param_noise_filter is used by default.
Returns
-------
act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
"""
if param_noise_filter_func is None:
param_noise_filter_func = default_param_noise_filter
with tf.variable_scope(scope, reuse=reuse):
observations_ph = make_obs_ph("observation")
stochastic_ph = tf.placeholder(tf.bool, (), name="stochastic")
update_eps_ph = tf.placeholder(tf.float32, (), name="update_eps")
update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name="update_param_noise_threshold")
update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name="update_param_noise_scale")
reset_ph = tf.placeholder(tf.bool, (), name="reset")
eps = tf.get_variable("eps", (), initializer=tf.constant_initializer(0))
param_noise_scale = tf.get_variable("param_noise_scale", (), initializer=tf.constant_initializer(0.01), trainable=False)
param_noise_threshold = tf.get_variable("param_noise_threshold", (), initializer=tf.constant_initializer(0.05), trainable=False)
# Unmodified Q.
q_values = q_func(observations_ph.get(), num_actions, scope="q_func")
# Perturbable Q used for the actual rollout.
q_values_perturbed = q_func(observations_ph.get(), num_actions, scope="perturbed_q_func")
# We have to wrap this code into a function due to the way tf.cond() works. See
# https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for
# a more detailed discussion.
def perturb_vars(original_scope, perturbed_scope):
all_vars = scope_vars(absolute_scope_name(original_scope))
all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))
assert len(all_vars) == len(all_perturbed_vars)
perturb_ops = []
for var, perturbed_var in zip(all_vars, all_perturbed_vars):
if param_noise_filter_func(perturbed_var):
# Perturb this variable.
op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale))
else:
# Do not perturb, just assign.
op = tf.assign(perturbed_var, var)
perturb_ops.append(op)
assert len(perturb_ops) == len(all_vars)
return tf.group(*perturb_ops)
# Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy
# of the network and measures the effect of that perturbation in action space. If the perturbation
# is too big, reduce scale of perturbation, otherwise increase.
q_values_adaptive = q_func(observations_ph.get(), num_actions, scope="adaptive_q_func")
perturb_for_adaption = perturb_vars(original_scope="q_func", perturbed_scope="adaptive_q_func")
kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)
mean_kl = tf.reduce_mean(kl)
def update_scale():
with tf.control_dependencies([perturb_for_adaption]):
update_scale_expr = tf.cond(mean_kl < param_noise_threshold,
lambda: param_noise_scale.assign(param_noise_scale * 1.01),
lambda: param_noise_scale.assign(param_noise_scale / 1.01),
)
return update_scale_expr
# Functionality to update the threshold for parameter space noise.
update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,
lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))
# Put everything together.
deterministic_actions = tf.argmax(q_values_perturbed, axis=1)
batch_size = tf.shape(observations_ph.get())[0]
random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)
chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)
output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)
update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))
updates = [
update_eps_expr,
tf.cond(reset_ph, lambda: perturb_vars(original_scope="q_func", perturbed_scope="perturbed_q_func"), lambda: tf.group(*[])),
tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),
update_param_noise_threshold_expr,
]
_act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],
outputs=output_actions,
givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},
updates=updates)
def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1):
return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)
return act
- |-
def contains(self, other, max_distance=1e-4):
"""
Estimate whether the bounding box contains a point.
Parameters
----------
other : tuple of number or imgaug.augmentables.kps.Keypoint
Point to check for.
max_distance : float
Maximum allowed euclidean distance between the point and the
closest point on the line. If the threshold is exceeded, the point
is not considered to be contained in the line.
Returns
-------
bool
True if the point is contained in the line string, False otherwise.
It is contained if its distance to the line or any of its points
is below a threshold.
"""
return self.compute_distance(other, default=np.inf) < max_distance
- |-
def is_fully_within_image(self, image):
"""
Estimate whether the bounding box is fully inside the image area.
Parameters
----------
image : (H,W,...) ndarray or tuple of int
Image dimensions to use.
If an ndarray, its shape will be used.
If a tuple, it is assumed to represent the image shape
and must contain at least two integers.
Returns
-------
bool
True if the bounding box is fully inside the image area. False otherwise.
"""
shape = normalize_shape(image)
height, width = shape[0:2]
return self.x1 >= 0 and self.x2 < width and self.y1 >= 0 and self.y2 < height
- source_sentence: Keypoint.copy
sentences:
- >-
def build_words_dataset(words=None, vocabulary_size=50000,
printable=True, unk_key='UNK'):
"""Build the words dictionary and replace rare words with 'UNK' token.
The most common word has the smallest integer id.
Parameters
----------
words : list of str or byte
The context in list format. You may need to do preprocessing on the words, such as lower case, remove marks etc.
vocabulary_size : int
The maximum vocabulary size, limiting the vocabulary size. Then the script replaces rare words with 'UNK' token.
printable : boolean
Whether to print the read vocabulary size of the given words.
unk_key : str
Represent the unknown words.
Returns
--------
data : list of int
The context in a list of ID.
count : list of tuple and list
Pair words and IDs.
- count[0] is a list : the number of rare words
- count[1:] are tuples : the number of occurrence of each word
- e.g. [['UNK', 418391], (b'the', 1061396), (b'of', 593677), (b'and', 416629), (b'one', 411764)]
dictionary : dictionary
It is `word_to_id` that maps word to ID.
reverse_dictionary : a dictionary
It is `id_to_word` that maps ID to word.
Examples
--------
>>> words = tl.files.load_matt_mahoney_text8_dataset()
>>> vocabulary_size = 50000
>>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size)
References
-----------------
- `tensorflow/examples/tutorials/word2vec/word2vec_basic.py <https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/word2vec/word2vec_basic.py>`__
"""
if words is None:
raise Exception("words : list of str or byte")
count = [[unk_key, -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
if printable:
tl.logging.info('Real vocabulary size %d' % len(collections.Counter(words).keys()))
tl.logging.info('Limited vocabulary size {}'.format(vocabulary_size))
if len(collections.Counter(words).keys()) < vocabulary_size:
raise Exception(
"len(collections.Counter(words).keys()) >= vocabulary_size , the limited vocabulary_size must be less than or equal to the read vocabulary_size"
)
return data, count, dictionary, reverse_dictionary
- >-
def Snowflakes(density=(0.005, 0.075), density_uniformity=(0.3, 0.9),
flake_size=(0.2, 0.7),
flake_size_uniformity=(0.4, 0.8), angle=(-30, 30), speed=(0.007, 0.03),
name=None, deterministic=False, random_state=None):
"""
Augmenter to add falling snowflakes to images.
This is a wrapper around ``SnowflakesLayer``. It executes 1 to 3 layers per image.
dtype support::
* ``uint8``: yes; tested
* ``uint16``: no (1)
* ``uint32``: no (1)
* ``uint64``: no (1)
* ``int8``: no (1)
* ``int16``: no (1)
* ``int32``: no (1)
* ``int64``: no (1)
* ``float16``: no (1)
* ``float32``: no (1)
* ``float64``: no (1)
* ``float128``: no (1)
* ``bool``: no (1)
- (1) Parameters of this augmenter are optimized for the value range of uint8.
While other dtypes may be accepted, they will lead to images augmented in
ways inappropriate for the respective dtype.
Parameters
----------
density : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Density of the snowflake layer, as a probability of each pixel in low resolution space to be a snowflake.
Valid value range is ``(0.0, 1.0)``. Recommended to be around ``(0.01, 0.075)``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
density_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Size uniformity of the snowflakes. Higher values denote more similarly sized snowflakes.
Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
flake_size : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Size of the snowflakes. This parameter controls the resolution at which snowflakes are sampled.
Higher values mean that the resolution is closer to the input image's resolution and hence each sampled
snowflake will be smaller (because of the smaller pixel size).
Valid value range is ``[0.0, 1.0)``. Recommended values:
* On ``96x128`` a value of ``(0.1, 0.4)`` worked well.
* On ``192x256`` a value of ``(0.2, 0.7)`` worked well.
* On ``960x1280`` a value of ``(0.7, 0.95)`` worked well.
Allowed datatypes:
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
flake_size_uniformity : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Controls the size uniformity of the snowflakes. Higher values mean that the snowflakes are more similarly
sized. Valid value range is ``(0.0, 1.0)``. Recommended to be around ``0.5``.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
angle : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Angle in degrees of motion blur applied to the snowflakes, where ``0.0`` is motion blur that points straight
upwards. Recommended to be around ``(-30, 30)``.
See also :func:`imgaug.augmenters.blur.MotionBlur.__init__`.
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
speed : number or tuple of number or list of number or imgaug.parameters.StochasticParameter
Perceived falling speed of the snowflakes. This parameter controls the motion blur's kernel size.
It follows roughly the form ``kernel_size = image_size * speed``. Hence,
Values around ``1.0`` denote that the motion blur should "stretch" each snowflake over the whole image.
Valid value range is ``(0.0, 1.0)``. Recommended values:
* On ``96x128`` a value of ``(0.01, 0.05)`` worked well.
* On ``192x256`` a value of ``(0.007, 0.03)`` worked well.
* On ``960x1280`` a value of ``(0.001, 0.03)`` worked well.
Allowed datatypes:
* If a number, then that value will be used for all images.
* If a tuple ``(a, b)``, then a value from the continuous range ``[a, b]`` will be used.
* If a list, then a random value will be sampled from that list per image.
* If a StochasticParameter, then a value will be sampled per image from that parameter.
name : None or str, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
deterministic : bool, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
random_state : None or int or numpy.random.RandomState, optional
See :func:`imgaug.augmenters.meta.Augmenter.__init__`.
Examples
--------
>>> aug = iaa.Snowflakes(flake_size=(0.1, 0.4), speed=(0.01, 0.05))
Adds snowflakes to small images (around ``96x128``).
>>> aug = iaa.Snowflakes(flake_size=(0.2, 0.7), speed=(0.007, 0.03))
Adds snowflakes to medium-sized images (around ``192x256``).
>>> aug = iaa.Snowflakes(flake_size=(0.7, 0.95), speed=(0.001, 0.03))
Adds snowflakes to large images (around ``960x1280``).
"""
if name is None:
name = "Unnamed%s" % (ia.caller_name(),)
layer = SnowflakesLayer(
density=density, density_uniformity=density_uniformity,
flake_size=flake_size, flake_size_uniformity=flake_size_uniformity,
angle=angle, speed=speed,
blur_sigma_fraction=(0.0001, 0.001)
)
return meta.SomeOf(
(1, 3), children=[layer.deepcopy() for _ in range(3)],
random_order=False, name=name, deterministic=deterministic, random_state=random_state
)
- |-
def copy(self, x=None, y=None):
"""
Create a shallow copy of the Keypoint object.
Parameters
----------
x : None or number, optional
Coordinate of the keypoint on the x axis.
If ``None``, the instance's value will be copied.
y : None or number, optional
Coordinate of the keypoint on the y axis.
If ``None``, the instance's value will be copied.
Returns
-------
imgaug.Keypoint
Shallow copy.
"""
return self.deepcopy(x=x, y=y)
model-index:
- name: SentenceTransformer based on sentence-transformers/all-mpnet-base-v2
results:
- task:
type: semantic-similarity
name: Semantic Similarity
dataset:
name: sts dev
type: sts-dev
metrics:
- type: pearson_cosine
value: 0.8806072274141987
name: Pearson Cosine
- type: spearman_cosine
value: 0.8810194487011652
name: Spearman Cosine
- type: pearson_manhattan
value: 0.8780911558324747
name: Pearson Manhattan
- type: spearman_manhattan
value: 0.8798257355327418
name: Spearman Manhattan
- type: pearson_euclidean
value: 0.8794084495321427
name: Pearson Euclidean
- type: spearman_euclidean
value: 0.8810194487011652
name: Spearman Euclidean
- type: pearson_dot
value: 0.8806072253861965
name: Pearson Dot
- type: spearman_dot
value: 0.8810194487011652
name: Spearman Dot
- type: pearson_max
value: 0.8806072274141987
name: Pearson Max
- type: spearman_max
value: 0.8810194487011652
name: Spearman Max
SentenceTransformer based on sentence-transformers/all-mpnet-base-v2
This is a sentence-transformers model finetuned from sentence-transformers/all-mpnet-base-v2 on the code-search-net/code_search_net dataset. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
Model Details
Model Description
- Model Type: Sentence Transformer
- Base model: sentence-transformers/all-mpnet-base-v2
- Maximum Sequence Length: 384 tokens
- Output Dimensionality: 768 tokens
- Similarity Function: Cosine Similarity
- Training Dataset:
- Language: code
Model Sources
- Documentation: Sentence Transformers Documentation
- Repository: Sentence Transformers on GitHub
- Hugging Face: Sentence Transformers on Hugging Face
Full Model Architecture
SentenceTransformer(
(0): Transformer({'max_seq_length': 384, 'do_lower_case': False}) with Transformer model: MPNetModel
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
(2): Normalize()
)
Usage
Direct Usage (Sentence Transformers)
First install the Sentence Transformers library:
pip install -U sentence-transformers
Then you can load this model and run inference.
from sentence_transformers import SentenceTransformer
# Download from the 🤗 Hub
model = SentenceTransformer("BoghdadyJR/al-MiniLM-L6-v2")
# Run inference
sentences = [
'Keypoint.copy',
'def copy(self, x=None, y=None):\n """\n Create a shallow copy of the Keypoint object.\n\n Parameters\n ----------\n x : None or number, optional\n Coordinate of the keypoint on the x axis.\n If ``None``, the instance\'s value will be copied.\n\n y : None or number, optional\n Coordinate of the keypoint on the y axis.\n If ``None``, the instance\'s value will be copied.\n\n Returns\n -------\n imgaug.Keypoint\n Shallow copy.\n\n """\n return self.deepcopy(x=x, y=y)',
'def build_words_dataset(words=None, vocabulary_size=50000, printable=True, unk_key=\'UNK\'):\n """Build the words dictionary and replace rare words with \'UNK\' token.\n The most common word has the smallest integer id.\n\n Parameters\n ----------\n words : list of str or byte\n The context in list format. You may need to do preprocessing on the words, such as lower case, remove marks etc.\n vocabulary_size : int\n The maximum vocabulary size, limiting the vocabulary size. Then the script replaces rare words with \'UNK\' token.\n printable : boolean\n Whether to print the read vocabulary size of the given words.\n unk_key : str\n Represent the unknown words.\n\n Returns\n --------\n data : list of int\n The context in a list of ID.\n count : list of tuple and list\n Pair words and IDs.\n - count[0] is a list : the number of rare words\n - count[1:] are tuples : the number of occurrence of each word\n - e.g. [[\'UNK\', 418391], (b\'the\', 1061396), (b\'of\', 593677), (b\'and\', 416629), (b\'one\', 411764)]\n dictionary : dictionary\n It is `word_to_id` that maps word to ID.\n reverse_dictionary : a dictionary\n It is `id_to_word` that maps ID to word.\n\n Examples\n --------\n >>> words = tl.files.load_matt_mahoney_text8_dataset()\n >>> vocabulary_size = 50000\n >>> data, count, dictionary, reverse_dictionary = tl.nlp.build_words_dataset(words, vocabulary_size)\n\n References\n -----------------\n - `tensorflow/examples/tutorials/word2vec/word2vec_basic.py <https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/word2vec/word2vec_basic.py>`__\n\n """\n if words is None:\n raise Exception("words : list of str or byte")\n\n count = [[unk_key, -1]]\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in words:\n if word in dictionary:\n index = dictionary[word]\n else:\n index = 0 # dictionary[\'UNK\']\n unk_count += 1\n data.append(index)\n count[0][1] = unk_count\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n if printable:\n tl.logging.info(\'Real vocabulary size %d\' % len(collections.Counter(words).keys()))\n tl.logging.info(\'Limited vocabulary size {}\'.format(vocabulary_size))\n if len(collections.Counter(words).keys()) < vocabulary_size:\n raise Exception(\n "len(collections.Counter(words).keys()) >= vocabulary_size , the limited vocabulary_size must be less than or equal to the read vocabulary_size"\n )\n return data, count, dictionary, reverse_dictionary',
]
embeddings = model.encode(sentences)
print(embeddings.shape)
# [3, 768]
# Get the similarity scores for the embeddings
similarities = model.similarity(embeddings, embeddings)
print(similarities.shape)
# [3, 3]
Evaluation
Metrics
Semantic Similarity
- Dataset:
sts-dev
- Evaluated with
EmbeddingSimilarityEvaluator
Metric | Value |
---|---|
pearson_cosine | 0.8806 |
spearman_cosine | 0.881 |
pearson_manhattan | 0.8781 |
spearman_manhattan | 0.8798 |
pearson_euclidean | 0.8794 |
spearman_euclidean | 0.881 |
pearson_dot | 0.8806 |
spearman_dot | 0.881 |
pearson_max | 0.8806 |
spearman_max | 0.881 |
Training Details
Training Dataset
code-search-net/code_search_net
- Dataset: code-search-net/code_search_net
- Size: 20,000 training samples
- Columns:
func_name
andwhole_func_string
- Approximate statistics based on the first 1000 samples:
func_name whole_func_string type string string details - min: 3 tokens
- mean: 8.18 tokens
- max: 21 tokens
- min: 38 tokens
- mean: 192.0 tokens
- max: 384 tokens
- Samples:
func_name whole_func_string ImageGraphCut.__msgc_step3_discontinuity_localization
def __msgc_step3_discontinuity_localization(self):
"""
Estimate discontinuity in basis of low resolution image segmentation.
:return: discontinuity in low resolution
"""
import scipy
start = self._start_time
seg = 1 - self.segmentation.astype(np.int8)
self.stats["low level object voxels"] = np.sum(seg)
self.stats["low level image voxels"] = np.prod(seg.shape)
# in seg is now stored low resolution segmentation
# back to normal parameters
# step 2: discontinuity localization
# self.segparams = sparams_hi
seg_border = scipy.ndimage.filters.laplace(seg, mode="constant")
logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None))
# logger.debug(str(np.max(seg_border)))
# logger.debug(str(np.min(seg_border)))
seg_border[seg_border != 0] = 1
logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None))
# scipy.ndimage.morphology.distance_transform_edt
boundary_dilatation_distance = self.segparams["boundary_dilatation_distance"]
seg = scipy.ndimage.morphology.binary_dilation(
seg_border,
# seg,
np.ones(
[
(boundary_dilatation_distance * 2) + 1,
(boundary_dilatation_distance * 2) + 1,
(boundary_dilatation_distance * 2) + 1,
]
),
)
if self.keep_temp_properties:
self.temp_msgc_lowres_discontinuity = seg
else:
self.temp_msgc_lowres_discontinuity = None
if self.debug_images:
import sed3
pd = sed3.sed3(seg_border) # ), contour=seg)
pd.show()
pd = sed3.sed3(seg) # ), contour=seg)
pd.show()
# segzoom = scipy.ndimage.interpolation.zoom(seg.astype('float'), zoom,
# order=0).astype('int8')
self.stats["t3"] = time.time() - start
return segImageGraphCut.__multiscale_gc_lo2hi_run
def __multiscale_gc_lo2hi_run(self): # , pyed):
"""
Run Graph-Cut segmentation with refinement of low resolution multiscale graph.
In first step is performed normal GC on low resolution data
Second step construct finer grid on edges of segmentation from first
step.
There is no option for use withoutuse_boundary_penalties
"""
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
self._msgc_lo2hi_resize_init()
self.__msgc_step0_init()
hard_constraints = self.__msgc_step12_low_resolution_segmentation()
# ===== high resolution data processing
seg = self.__msgc_step3_discontinuity_localization()
self.stats["t3.1"] = (time.time() - self._start_time)
graph = Graph(
seg,
voxelsize=self.voxelsize,
nsplit=self.segparams["block_size"],
edge_weight_table=self._msgc_npenalty_table,
compute_low_nodes_index=True,
)
# graph.run() = graph.generate_base_grid() + graph.split_voxels()
# graph.run()
graph.generate_base_grid()
self.stats["t3.2"] = (time.time() - self._start_time)
graph.split_voxels()
self.stats["t3.3"] = (time.time() - self._start_time)
self.stats.update(graph.stats)
self.stats["t4"] = (time.time() - self._start_time)
mul_mask, mul_val = self.__msgc_tlinks_area_weight_from_low_segmentation(seg)
area_weight = 1
unariesalt = self.__create_tlinks(
self.img,
self.voxelsize,
self.seeds,
area_weight=area_weight,
hard_constraints=hard_constraints,
mul_mask=None,
mul_val=None,
)
# N-links prepared
self.stats["t5"] = (time.time() - self._start_time)
un, ind = np.unique(graph.msinds, return_index=True)
self.stats["t6"] = (time.time() - self._start_time)
self.stats["t7"] = (time.time() - self._start_time)
unariesalt2_lo2hi = np.hstack(
[unariesalt[ind, 0, 0].reshape(-1, 1), unariesalt[ind, 0, 1].reshape(-1, 1)]
)
nlinks_lo2hi = np.hstack([graph.edges, graph.edges_weights.reshape(-1, 1)])
if self.debug_images:
import sed3
ed = sed3.sed3(unariesalt[:, :, 0].reshape(self.img.shape))
ed.show()
import sed3
ed = sed3.sed3(unariesalt[:, :, 1].reshape(self.img.shape))
ed.show()
# ed = sed3.sed3(seg)
# ed.show()
# import sed3
# ed = sed3.sed3(graph.data)
# ed.show()
# import sed3
# ed = sed3.sed3(graph.msinds)
# ed.show()
# nlinks, unariesalt2, msinds = self.__msgc_step45678_construct_graph(area_weight, hard_constraints, seg)
# self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds)
self.__msgc_step9_finish_perform_gc_and_reshape(
nlinks_lo2hi, unariesalt2_lo2hi, graph.msinds
)
self._msgc_lo2hi_resize_clean_finish()ImageGraphCut.__multiscale_gc_hi2lo_run
def __multiscale_gc_hi2lo_run(self): # , pyed):
"""
Run Graph-Cut segmentation with simplifiyng of high resolution multiscale graph.
In first step is performed normal GC on low resolution data
Second step construct finer grid on edges of segmentation from first
step.
There is no option for use withoutuse_boundary_penalties
"""
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
self.__msgc_step0_init()
hard_constraints = self.__msgc_step12_low_resolution_segmentation()
# ===== high resolution data processing
seg = self.__msgc_step3_discontinuity_localization()
nlinks, unariesalt2, msinds = self.__msgc_step45678_hi2lo_construct_graph(
hard_constraints, seg
)
self.__msgc_step9_finish_perform_gc_and_reshape(nlinks, unariesalt2, msinds) - Loss:
MultipleNegativesRankingLoss
with these parameters:{ "scale": 20.0, "similarity_fct": "cos_sim" }
Evaluation Dataset
code-search-net/code_search_net
- Dataset: code-search-net/code_search_net
- Size: 15,000 evaluation samples
- Columns:
func_name
andwhole_func_string
- Approximate statistics based on the first 1000 samples:
func_name whole_func_string type string string details - min: 3 tokens
- mean: 9.23 tokens
- max: 24 tokens
- min: 50 tokens
- mean: 276.31 tokens
- max: 384 tokens
- Samples:
func_name whole_func_string learn
def learn(env,
network,
seed=None,
lr=5e-4,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
checkpoint_path=None,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
load_path=None,
**network_kwargs
):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
network: string or a function
neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
(mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
seed: int or None
prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
lr: float
learning rate for adam optimizer
total_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model everytrain_freq
steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network everytarget_network_update_freq
steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to total_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
load_path: str
path to load the model from. (default: None)
**network_kwargs
additional keyword arguments to pass to the network builder.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = get_session()
set_global_seeds(seed)
q_func = build_q_func(network, **network_kwargs)
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space = env.observation_space
def make_obs_ph(name):
return ObservationInput(observation_space, name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = total_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
td = checkpoint_path or td
model_file = os.path.join(td, "model")
model_saved = False
if tf.train.latest_checkpoint(td) is not None:
load_variables(model_file)
logger.log('Loaded model from {}'.format(model_file))
model_saved = True
elif load_path is not None:
load_variables(load_path)
logger.log('Loaded model from {}'.format(load_path))
for t in range(total_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
save_variables(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
load_variables(model_file)
return actActWrapper.save_act
def save_act(self, path=None):
"""Save model to a pickle located atpath
"""
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
save_variables(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f)nature_cnn
def nature_cnn(unscaled_images, **conv_kwargs):
"""
CNN from Nature paper.
"""
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),
**conv_kwargs))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h3 = conv_to_fc(h3)
return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))) - Loss:
MultipleNegativesRankingLoss
with these parameters:{ "scale": 20.0, "similarity_fct": "cos_sim" }
Training Hyperparameters
Non-Default Hyperparameters
eval_strategy
: stepsper_device_train_batch_size
: 16per_device_eval_batch_size
: 16learning_rate
: 2e-05num_train_epochs
: 1warmup_ratio
: 0.1fp16
: Truebatch_sampler
: no_duplicates
All Hyperparameters
Click to expand
overwrite_output_dir
: Falsedo_predict
: Falseeval_strategy
: stepsprediction_loss_only
: Trueper_device_train_batch_size
: 16per_device_eval_batch_size
: 16per_gpu_train_batch_size
: Noneper_gpu_eval_batch_size
: Nonegradient_accumulation_steps
: 1eval_accumulation_steps
: Nonelearning_rate
: 2e-05weight_decay
: 0.0adam_beta1
: 0.9adam_beta2
: 0.999adam_epsilon
: 1e-08max_grad_norm
: 1.0num_train_epochs
: 1max_steps
: -1lr_scheduler_type
: linearlr_scheduler_kwargs
: {}warmup_ratio
: 0.1warmup_steps
: 0log_level
: passivelog_level_replica
: warninglog_on_each_node
: Truelogging_nan_inf_filter
: Truesave_safetensors
: Truesave_on_each_node
: Falsesave_only_model
: Falserestore_callback_states_from_checkpoint
: Falseno_cuda
: Falseuse_cpu
: Falseuse_mps_device
: Falseseed
: 42data_seed
: Nonejit_mode_eval
: Falseuse_ipex
: Falsebf16
: Falsefp16
: Truefp16_opt_level
: O1half_precision_backend
: autobf16_full_eval
: Falsefp16_full_eval
: Falsetf32
: Nonelocal_rank
: 0ddp_backend
: Nonetpu_num_cores
: Nonetpu_metrics_debug
: Falsedebug
: []dataloader_drop_last
: Falsedataloader_num_workers
: 0dataloader_prefetch_factor
: Nonepast_index
: -1disable_tqdm
: Falseremove_unused_columns
: Truelabel_names
: Noneload_best_model_at_end
: Falseignore_data_skip
: Falsefsdp
: []fsdp_min_num_params
: 0fsdp_config
: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}fsdp_transformer_layer_cls_to_wrap
: Noneaccelerator_config
: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}deepspeed
: Nonelabel_smoothing_factor
: 0.0optim
: adamw_torchoptim_args
: Noneadafactor
: Falsegroup_by_length
: Falselength_column_name
: lengthddp_find_unused_parameters
: Noneddp_bucket_cap_mb
: Noneddp_broadcast_buffers
: Falsedataloader_pin_memory
: Truedataloader_persistent_workers
: Falseskip_memory_metrics
: Trueuse_legacy_prediction_loop
: Falsepush_to_hub
: Falseresume_from_checkpoint
: Nonehub_model_id
: Nonehub_strategy
: every_savehub_private_repo
: Falsehub_always_push
: Falsegradient_checkpointing
: Falsegradient_checkpointing_kwargs
: Noneinclude_inputs_for_metrics
: Falseeval_do_concat_batches
: Truefp16_backend
: autopush_to_hub_model_id
: Nonepush_to_hub_organization
: Nonemp_parameters
:auto_find_batch_size
: Falsefull_determinism
: Falsetorchdynamo
: Noneray_scope
: lastddp_timeout
: 1800torch_compile
: Falsetorch_compile_backend
: Nonetorch_compile_mode
: Nonedispatch_batches
: Nonesplit_batches
: Noneinclude_tokens_per_second
: Falseinclude_num_input_tokens_seen
: Falseneftune_noise_alpha
: Noneoptim_target_modules
: Nonebatch_eval_metrics
: Falseeval_on_start
: Falsebatch_sampler
: no_duplicatesmulti_dataset_batch_sampler
: proportional
Training Logs
Epoch | Step | Training Loss | loss | sts-dev_spearman_cosine |
---|---|---|---|---|
0 | 0 | - | - | 0.8810 |
0.08 | 100 | 0.4124 | 0.2191 | - |
0.16 | 200 | 0.108 | 0.0993 | - |
0.24 | 300 | 0.127 | 0.0756 | - |
0.32 | 400 | 0.0728 | - | - |
0.08 | 100 | 0.0662 | 0.0683 | - |
0.16 | 200 | 0.0321 | 0.0660 | - |
0.24 | 300 | 0.0815 | 0.0584 | - |
0.32 | 400 | 0.049 | 0.0591 | - |
0.4 | 500 | 0.0636 | 0.0612 | - |
0.48 | 600 | 0.0929 | 0.0577 | - |
0.56 | 700 | 0.0342 | 0.0568 | - |
0.64 | 800 | 0.0265 | 0.0572 | - |
0.72 | 900 | 0.0406 | 0.0551 | - |
0.8 | 1000 | 0.039 | 0.0549 | - |
0.88 | 1100 | 0.0376 | 0.0551 | - |
0.96 | 1200 | 0.0823 | 0.0556 | - |
Framework Versions
- Python: 3.10.13
- Sentence Transformers: 3.0.1
- Transformers: 4.42.3
- PyTorch: 2.1.2
- Accelerate: 0.32.1
- Datasets: 2.20.0
- Tokenizers: 0.19.1
Citation
BibTeX
Sentence Transformers
@inproceedings{reimers-2019-sentence-bert,
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
author = "Reimers, Nils and Gurevych, Iryna",
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
month = "11",
year = "2019",
publisher = "Association for Computational Linguistics",
url = "https://arxiv.org/abs/1908.10084",
}
MultipleNegativesRankingLoss
@misc{henderson2017efficient,
title={Efficient Natural Language Response Suggestion for Smart Reply},
author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},
year={2017},
eprint={1705.00652},
archivePrefix={arXiv},
primaryClass={cs.CL}
}