package main

var inputParams = map[string][]string{
	"cudnnActivationBackward":                            {"handle", "activationDesc", "", "alpha", "beta", "yDesc", "y", "dyDesc", "dy", "xDesc", "x", "dxDesc"},
	"cudnnActivationForward":                             {"handle", "activationDesc", "alpha", "beta", "xDesc", "x", "yDesc"},
	"cudnnAddTensor":                                     {"handle", "alpha", "beta", "aDesc", "A", "cDesc"},
	"cudnnBatchNormalizationBackward":                    {"handle", "mode", "alphaDataDiff", "betaDataDiff", "alphaParamDiff", "betaParamDiff", "xDesc", "x", "dyDesc", "dy", "dxDesc", "dx", "bnScaleBiasDiffDesc", "bnScale", "epsilon", "savedMean", "savedInvVariance"},
	"cudnnBatchNormalizationForwardInference":            {"handle", "mode", "alpha", "beta", "xDesc", "yDesc", "x", "y", "bnScaleBiasMeanVarDesc", "bnScaleData", "bnBiasData", "estimatedMean", "estimatedVariance", "epsilon"},
	"cudnnBatchNormalizationForwardTraining":             {"handle", "mode", "alpha", "beta", "xDesc", "yDesc", "x", "y", "bnScaleBiasMeanVarDesc", "bnScale", "bnBias", "exponentialAverageFactor", "epsilon"},
	"cudnnCTCLoss":                                       {"handle", "probsDesc", "probs", "labels", "labelLengths", "inputLengths", "gradientsDesc", "algo", "ctcLossDesc", "workspace", "sizeInBytes"},
	"cudnnConvolutionBackwardBias":                       {"handle", "alpha", "beta", "dyDesc", "dy", "dbDesc"},
	"cudnnConvolutionBackwardData":                       {"handle", "alpha", "beta", "wDesc", "w", "dyDesc", "dy", "convDesc", "algo", "workSpace", "workSpaceSizeInBytes", "dxDesc"},
	"cudnnConvolutionBackwardFilter":                     {"handle", "alpha", "beta", "xDesc", "x", "dyDesc", "dy", "convDesc", "algo", "workSpace", "workSpaceSizeInBytes", "dwDesc"},
	"cudnnConvolutionBiasActivationForward":              {"handle", "alpha1", "alpha2", "xDesc", "x", "wDesc", "w", "convDesc", "algo", "workSpace", "workSpaceSizeInBytes", "zDesc", "z", "biasDesc", "bias", "activationDesc", "yDesc"},
	"cudnnConvolutionForward":                            {"handle", "alpha", "beta", "xDesc", "x", "wDesc", "w", "convDesc", "algo", "workSpace", "workSpaceSizeInBytes", "yDesc"},
	"cudnnCreateTensorDescriptor":                        {"tensorDesc"},
	"cudnnDeriveBNTensorDescriptor":                      {"xDesc", "mode"},
	"cudnnDestroy":                                       {"handle"},
	"cudnnDestroyCTCLossDescriptor":                      {"ctcLossDesc"},
	"cudnnDestroyOpTensorDescriptor":                     {"opTensorDesc"},
	"cudnnDestroyReduceTensorDescriptor":                 {"tensorDesc"},
	"cudnnDestroyTensorDescriptor":                       {"tensorDesc"},
	"cudnnDivisiveNormalizationBackward":                 {"handle", "normDesc", "mode", "alpha", "beta", "xDesc", "x", "means", "dy", "dxDesc"},
	"cudnnDivisiveNormalizationForward":                  {"handle", "normDesc", "divNormMode", "alpha", "beta", "xDesc", "yDesc", "x", "means"},
	"cudnnDropoutBackward":                               {"handle", "dropoutDesc", "dyDesc", "dy", "dxDesc", "reserveSpace", "reserveSpaceSizeInBytes"},
	"cudnnDropoutForward":                                {"handle", "dropoutDesc", "xDesc", "x", "yDesc", "reserveSpaceSizeInBytes"},
	"cudnnDropoutGetReserveSpaceSize":                    {"xDesc"},
	"cudnnDropoutGetStatesSize":                          {"handle"},
	"cudnnFindConvolutionBackwardDataAlgorithm":          {"handle", "wDesc", "dyDesc", "convDesc", "dxDesc", "requestedAlgoCount"},
	"cudnnFindConvolutionBackwardDataAlgorithmEx":        {"handle", "wDesc", "w", "dyDesc", "dy", "convDesc", "dxDesc", "requestedAlgoCount", "workSpace", "workSpaceSizeInBytes"},
	"cudnnFindConvolutionBackwardFilterAlgorithm":        {"handle", "xDesc", "dyDesc", "convDesc", "dwDesc", "requestedAlgoCount"},
	"cudnnFindConvolutionBackwardFilterAlgorithmEx":      {"handle", "xDesc", "x", "dyDesc", "dy", "convDesc", "dwDesc", "requestedAlgoCount", "workSpace", "workSpaceSizeInBytes"},
	"cudnnFindConvolutionForwardAlgorithm":               {"handle", "xDesc", "wDesc", "convDesc", "yDesc", "requestedAlgoCount"},
	"cudnnFindConvolutionForwardAlgorithmEx":             {"handle", "xDesc", "x", "wDesc", "w", "convDesc", "yDesc", "requestedAlgoCount", "workSpace", "workSpaceSizeInBytes"},
	"cudnnFindRNNBackwardDataAlgorithmEx":                {"handle", "rnnDesc", "seqLength", "yDesc", "y", "dyDesc", "dy", "dhyDesc", "dhy", "dcyDesc", "dcy", "wDesc", "w", "hxDesc", "hx", "cxDesc", "cx", "dxDesc", "dhxDesc", "dcxDesc", "findIntensity", "requestedAlgoCount", "workspace", "workSpaceSizeInBytes", "reserveSpaceSizeInBytes"},
	"cudnnFindRNNBackwardWeightsAlgorithmEx":             {"handle", "rnnDesc", "seqLength", "xDesc", "x", "hxDesc", "hx", "yDesc", "y", "findIntensity", "requestedAlgoCount", "workspace", "workSpaceSizeInBytes", "dwDesc", "reserveSpace", "reserveSpaceSizeInBytes"},
	"cudnnFindRNNForwardInferenceAlgorithmEx":            {"handle", "rnnDesc", "seqLength", "xDesc", "x", "hxDesc", "hx", "cxDesc", "cx", "wDesc", "w", "yDesc", "hyDesc", "cyDesc", "findIntensity", "requestedAlgoCount", "workspace", "workSpaceSizeInBytes"},
	"cudnnFindRNNForwardTrainingAlgorithmEx":             {"handle", "rnnDesc", "xDesc", "seqLength", "x", "hxDesc", "hx", "cxDesc", "cx", "wDesc", "w", "yDesc", "hyDesc", "cyDesc", "findIntensity", "requestedAlgoCount", "workspace", "workSpaceSizeInBytes", "reserveSpaceSizeInBytes"},
	"cudnnGetActivationDescriptor":                       {"activationDesc"},
	"cudnnGetAlgorithmDescriptor":                        {"algorithmDesc", "algorithm"},
	"cudnnGetAlgorithmSpaceSize":                         {"handle", "algoDesc"},
	"cudnnGetCTCLossDescriptor":                          {"ctcLossDesc"},
	"cudnnGetCTCLossWorkspaceSize":                       {"handle", "probsDesc", "gradientsDesc", "labels", "labelLengths", "inputLengths", "algo", "ctcLossDesc"},
	"cudnnGetConvolution2dForwardOutputDim":              {"convDesc", "inputTensorDesc", "filterDesc"},
	"cudnnGetConvolutionBackwardDataAlgorithm":           {"handle", "wDesc", "dyDesc", "convDesc", "dxDesc", "preference", "memoryLimitInBytes"},
	"cudnnGetConvolutionBackwardDataAlgorithmMaxCount":   {"handle"},
	"cudnnGetConvolutionBackwardDataAlgorithm_v7":        {"handle", "wDesc", "dyDesc", "convDesc", "dxDesc", "requestedAlgoCount"},
	"cudnnGetConvolutionBackwardDataWorkspaceSize":       {"handle", "wDesc", "dyDesc", "convDesc", "dxDesc", "algo"},
	"cudnnGetConvolutionBackwardFilterAlgorithm":         {"handle", "xDesc", "dyDesc", "convDesc", "dwDesc", "preference", "memoryLimitInBytes"},
	"cudnnGetConvolutionBackwardFilterAlgorithmMaxCount": {"handle"},
	"cudnnGetConvolutionBackwardFilterAlgorithm_v7":      {"handle", "xDesc", "dyDesc", "convDesc", "dwDesc", "requestedAlgoCount"},
	"cudnnGetConvolutionBackwardFilterWorkspaceSize":     {"handle", "xDesc", "dyDesc", "convDesc", "dwDesc", "algo"},
	"cudnnGetConvolutionForwardAlgorithm":                {"handle", "xDesc", "wDesc", "convDesc", "yDesc", "preference", "memoryLimitInBytes"},
	"cudnnGetConvolutionForwardAlgorithmMaxCount":        {"handle"},
	"cudnnGetConvolutionForwardAlgorithm_v7":             {"handle", "xDesc", "wDesc", "convDesc", "yDesc", "requestedAlgoCount"},
	"cudnnGetConvolutionForwardWorkspaceSize":            {"handle", "xDesc", "wDesc", "convDesc", "yDesc", "algo"},
	"cudnnGetConvolutionNdDescriptor":                    {"arrayLengthRequested"},
	"cudnnGetConvolutionNdForwardOutputDim":              {"convDesc", "inputTensorDesc", "filterDesc", "nbDims"},
	"cudnnGetDropoutDescriptor":                          {"dropoutDesc", "handle"},
	"cudnnGetErrorString":                                {"status"},
	"cudnnGetFilter4dDescriptor":                         {"filterDesc"},
	"cudnnGetFilterNdDescriptor":                         {"wDesc", "nbDimsRequested"},
	"cudnnGetOpTensorDescriptor":                         {"opTensorDesc"},
	"cudnnGetPooling2dDescriptor":                        {"poolingDesc"},
	"cudnnGetPooling2dForwardOutputDim":                  {"poolingDesc", "inputDesc"},
	"cudnnGetPoolingNdDescriptor":                        {"poolingDesc", "nbDimsRequested", "maxpoolingNanOpt"},
	"cudnnGetPoolingNdForwardOutputDim":                  {"poolingDesc", "inputDesc", "nbDims"},
	"cudnnGetProperty":                                   {"type"},
	"cudnnGetRNNDataDescriptor":                          {"RNNDataDesc", "arrayLengthRequested"},
	"cudnnGetRNNDescriptor":                              {"handle", "rnnDesc"},
	"cudnnGetRNNLinLayerBiasParams":                      {"handle", "rnnDesc", "pseudoLayer", "xDesc", "wDesc", "w", "linLayerID"},
	"cudnnGetRNNLinLayerMatrixParams":                    {"handle", "rnnDesc", "pseudoLayer", "xDesc", "wDesc", "w", "linLayerID"},
	"cudnnGetRNNParamsSize":                              {"handle", "rnnDesc", "xDesc", "dataType"},
	"cudnnGetRNNPaddingMode":                             {"*paddingMode"},
	"cudnnGetRNNProjectionLayers":                        {"handle", "rnnDesc"},
	"cudnnGetRNNTrainingReserveSize":                     {"handle", "rnnDesc", "seqLength", "xDesc"},
	"cudnnGetRNNWorkspaceSize":                           {"handle", "rnnDesc", "seqLength", "xDesc"},
	"cudnnGetReduceTensorDescriptor":                     {"reduceTensorDesc", "reduceTensorNanOpt"},
	"cudnnGetReductionIndicesSize":                       {"handle", "reduceDesc", "aDesc", "cDesc"},
	"cudnnGetReductionWorkspaceSize":                     {"handle", "reduceDesc", "aDesc", "cDesc"},
	"cudnnGetStream":                                     {"handle"},
	"cudnnGetTensor4dDescriptor":                         {"tensorDesc"},
	"cudnnGetTensorNdDescriptor":                         {"tensorDesc", "nbDimsRequested", "strideA"},
	"cudnnGetTensorSizeInBytes":                          {"tensorDesc"},
	"cudnnIm2Col":                                        {"handle", "srcDesc", "srcData", "filterDesc", "convDesc"},
	"cudnnLRNCrossChannelBackward":                       {"handle", "normDesc", "lrnMode", "alpha", "beta", "yDesc", "y", "dyDesc", "dy", "xDesc", "x"},
	"cudnnLRNCrossChannelForward":                        {"handle", "normDesc", "lrnMode", "alpha", "beta", "xDesc", "yDesc", "x"},
	"cudnnOpTensor":                                      {"handle", "opTensorDesc", "alpha1", "alpha2", "beta", "aDesc", "bDesc", "cDesc", "A", "B"},
	"cudnnPoolingBackward":                               {"handle", "poolingDesc", "alpha", "beta", "yDesc", "y", "dyDesc", "dy", "xDesc", "x", "dxDesc"},
	"cudnnPoolingForward":                                {"handle", "poolingDesc", "alpha", "beta", "xDesc", "x", "yDesc"},
	"cudnnQueryRuntimeError":                             {"handle", "mode"},
	"cudnnRNNBackwardData":                               {"handle", "rnnDesc", "seqLength", "yDesc", "y", "dyDesc", "dy", "dhyDesc", "dhy", "dcyDesc", "dcy", "wDesc", "w", "hxDesc", "hx", "cxDesc", "cx", "dxDesc", "dhxDesc", "dcxDesc", "workspace", "workSpaceSizeInBytes", "reserveSpaceSizeInBytes"},
	"cudnnRNNBackwardDataEx":                             {"handle", "rnnDesc", "yDesc", "y", "dyDesc", "dy", "dhyDesc", "dhy", "dcyDesc", "dcy", "wDesc", "w", "hxDesc", "hx", "cxDesc", "cx", "dxDesc", "dhxDesc", "dcxDesc", "dkDesc", "dkeys", "workspace", "workSpaceSizeInBytes", "reserveSpaceSizeInBytes"},
	"cudnnRNNBackwardWeights":                            {"handle", "rnnDesc", "seqLength", "xDesc", "x", "hxDesc", "hx", "yDesc", "y", "workspace", "workSpaceSizeInBytes", "dwDesc", "reserveSpace", "reserveSpaceSizeInBytes"},
	"cudnnRNNBackwardWeightsEx":                          {"handle", "rnnDesc", "seqLength", "xDesc", "x", "hxDesc", "hx", "yDesc", "y", "workspace", "workSpaceSizeInBytes", "dwDesc", "reserveSpace", "reserveSpaceSizeInBytes"},
	"cudnnRNNForwardInference":                           {"handle", "rnnDesc", "seqLength", "xDesc", "x", "hxDesc", "hx", "cxDesc", "cx", "wDesc", "w", "yDesc", "hyDesc", "cyDesc", "workspace", "workSpaceSizeInBytes"},
	"cudnnRNNForwardInferenceEx":                         {"handle", "rnnDesc", "xDesc", "x", "hxDesc", "hx", "cxDesc", "cx", "wDesc", "w", "yDesc", "hyDesc", "cyDesc", "kDesc", "Keys", "cDesc", "cAttn", "iDesc", "iAttn", "qDesc", "Queries", "workspace", "workSpaceSizeInBytes"},
	"cudnnRNNForwardTraining":                            {"handle", "rnnDesc", "seqLength", "xDesc", "x", "hxDesc", "hx", "cxDesc", "cx", "wDesc", "w", "yDesc", "hyDesc", "cyDesc", "workspace", "workSpaceSizeInBytes", "reserveSpaceSizeInBytes"},
	"cudnnRNNForwardTrainingEx":                          {"handle", "rnnDesc", "xDesc", "x", "hxDesc", "hx", "cxDesc", "cx", "wDesc", "w", "yDesc", "hyDesc", "cyDesc", "kDesc", "Keys", "cDesc", "cAttn", "iDesc", "iAttn", "qDesc", "Queries", "workspace", "workSpaceSizeInBytes", "reserveSpaceSizeInBytes"},
	"cudnnRNNSetClip":                                    {"clipMode", "lclip", "rclip", "clipNanOpt"},
	"cudnnReduceTensor":                                  {"handle", "reduceTensorDesc", "indicesSizeInBytes", "workspace", "workspaceSizeInBytes", "alpha", "beta", "aDesc", "cDesc", "A"},
	"cudnnRestoreAlgorithm":                              {"handle", "algoDesc", "algoSpace", "algoSpaceSizeInBytes"},
	"cudnnRestoreDropoutDescriptor":                      {"handle", "dropout", "states", "stateSizeInBytes", "seed"},
	"cudnnSaveAlgorithm":                                 {"handle", "algoDesc", "algoSpace", "algoSpaceSizeInBytes"},
	"cudnnScaleTensor":                                   {"handle", "yDesc", "alpha"},
	"cudnnSetActivationDescriptor":                       {"mode", "reluNanOpt", "coef"},
	"cudnnSetAlgorithmDescriptor":                        {"algorithm"},
	"cudnnSetAlgorithmPerformance":                       {"algoDesc", "status", "time", "memory"},
	"cudnnSetCTCLossDescriptor":                          {"compType"},
	"cudnnSetCallback":                                   {"mask", "udata", "fptr"},
	"cudnnSetConvolution2dDescriptor":                    {"pad_h", "pad_w", "u", "v", "dilation_h", "dilation_w", "mode", "computeType"},
	"cudnnSetConvolutionNdDescriptor":                    {"arrayLength", "padA", "filterStrideA", "dilationA", "mode", "datatype"},
	"cudnnSetDropoutDescriptor":                          {"handle", "dropout", "stateSizeInBytes", "seed"},
	"cudnnSetFilter4dDescriptor":                         {"datatype", "format", "k", "c", "h", "w"},
	"cudnnSetFilterNdDescriptor":                         {"datatype", "format", "nbDims", "filterDimA"},
	"cudnnSetLRNDescriptor":                              {"lrnN", "lrnAlpha", "lrnBeta", "lrnK"},
	"cudnnSetOpTensorDescriptor":                         {"opTensorOp", "opTensorCompType", "opTensorNanOpt"},
	"cudnnSetPooling2dDescriptor":                        {"mode", "maxpoolingNanOpt", "windowHeight", "windowWidth", "verticalPadding", "horizontalPadding", "verticalStride", "horizontalStride"},
	"cudnnSetPoolingNdDescriptor":                        {"mode", "maxpoolingNanOpt", "nbDims"},
	"cudnnSetRNNDataDescriptor":                          {"dataType", "layout", "maxSeqLength", "batchSize", "vectorSize", "seqLengthArray", "paddingFill"},
	"cudnnSetRNNDescriptor":                              {"hiddenSize", "numLayers", "dropoutDesc", "inputMode", "direction", "mode", "dataType"},
	"cudnnSetRNNDescriptor_v5":                           {"hiddenSize", "numLayers", "dropoutDesc", "inputMode", "direction", "mode", "dataType"},
	"cudnnSetRNNDescriptor_v6":                           {"handle", "hiddenSize", "numLayers", "dropoutDesc", "inputMode", "direction", "mode", "algo", "dataType"},
	"cudnnSetRNNMatrixMathType":                          {"rnnDesc", "mType"},
	"cudnnSetRNNPaddingMode":                             {"paddingMode"},
	"cudnnSetRNNProjectionLayers":                        {"handle", "rnnDesc", "recProjSize", "outProjSize"},
	"cudnnSetReduceTensorDescriptor":                     {"reduceTensorOp", "reduceTensorCompType", "reduceTensorNanOpt", "reduceTensorIndices", "reduceTensorIndicesType"},
	"cudnnSetSpatialTransformerNdDescriptor":             {"samplerType", "dataType", "nbDims", "dimA"},
	"cudnnSetStream":                                     {"handle", "streamID"},
	"cudnnSetTensor":                                     {"handle", "yDesc", "valuePtr"},
	"cudnnSetTensor4dDescriptor":                         {"format", "datatype", "n", "c", "h", "w"},
	"cudnnSetTensor4dDescriptorEx":                       {"datatype", "n", "c", "h", "w", "nStride", "cStride", "hStride", "wStride"},
	"cudnnSetTensorNdDescriptor":                         {"datatype", "nbDims", "dimA", "strideA"},
	"cudnnSetTensorNdDescriptorEx":                       {"format", "dataType", "nbDims", "dimA"},
	"cudnnSoftmaxBackward":                               {"handle", "algorithm", "mode", "alpha", "beta", "yDesc", "y", "dyDesc", "dy", "dxDesc"},
	"cudnnSoftmaxForward":                                {"handle", "algorithm", "mode", "alpha", "beta", "xDesc", "x", "yDesc"},
	"cudnnSpatialTfGridGeneratorBackward":                {"handle", "stDesc", "dgrid"},
	"cudnnSpatialTfGridGeneratorForward":                 {"handle", "stDesc", "theta"},
	"cudnnSpatialTfSamplerBackward":                      {"handle", "stDesc", "alpha", "beta", "xDesc", "x", "dxDesc", "alphaDgrid", "betaDgrid", "dyDesc", "dy", "grid"},
	"cudnnSpatialTfSamplerForward":                       {"handle", "stDesc", "alpha", "beta", "xDesc", "x", "grid", "yDesc"},
	"cudnnTransformTensor":                               {"handle", "alpha", "beta", "xDesc", "x", "yDesc"},
}
var outputParams = map[string][]string{
	"cudnnActivationBackward":                            {"dx"},
	"cudnnActivationForward":                             {"y"},
	"cudnnBatchNormalizationBackward":                    {"resultBnScaleDiff", "resultBnBiasDiff"},
	"cudnnBatchNormalizationForwardTraining":             {"resultSaveMean", "resultSaveInvVariance"},
	"cudnnCTCLoss":                                       {"costs", "gradients"},
	"cudnnConvolutionBackwardBias":                       {"db"},
	"cudnnCreate":                                        {"handle"},
	"cudnnCreateCTCLossDescriptor":                       {"ctcLossDesc"},
	"cudnnCreateOpTensorDescriptor":                      {"opTensorDesc"},
	"cudnnDeriveBNTensorDescriptor":                      {"derivedBnDesc"},
	"cudnnDivisiveNormalizationBackward":                 {"dx", "dMeans"},
	"cudnnDivisiveNormalizationForward":                  {"y"},
	"cudnnDropoutBackward":                               {"dx"},
	"cudnnDropoutForward":                                {"y", "reserveSpace"},
	"cudnnDropoutGetReserveSpaceSize":                    {"sizeInBytes"},
	"cudnnDropoutGetStatesSize":                          {"sizeInBytes"},
	"cudnnFindConvolutionBackwardDataAlgorithm":          {"returnedAlgoCount", "perfResults"},
	"cudnnFindConvolutionBackwardDataAlgorithmEx":        {"returnedAlgoCount", "perfResults"},
	"cudnnFindConvolutionBackwardFilterAlgorithm":        {"returnedAlgoCount", "perfResults"},
	"cudnnFindConvolutionBackwardFilterAlgorithmEx":      {"returnedAlgoCount", "perfResults"},
	"cudnnFindConvolutionForwardAlgorithm":               {"returnedAlgoCount", "perfResults"},
	"cudnnFindConvolutionForwardAlgorithmEx":             {"returnedAlgoCount", "perfResults"},
	"cudnnFindRNNBackwardDataAlgorithmEx":                {"dx", "dhx", "dcx", "returnedAlgoCount", "perfResults"},
	"cudnnFindRNNBackwardWeightsAlgorithmEx":             {"returnedAlgoCount", "perfResults"},
	"cudnnFindRNNForwardInferenceAlgorithmEx":            {"y", "hy", "cy", "returnedAlgoCount", "perfResults"},
	"cudnnFindRNNForwardTrainingAlgorithmEx":             {"y", "hy", "cy", "returnedAlgoCount", "perfResults"},
	"cudnnGetActivationDescriptor":                       {"mode", "reluNanOpt", "coef"},
	"cudnnGetAlgorithmPerformance":                       {"algoDesc", "status", "timecoef", "memory"},
	"cudnnGetCTCLossDescriptor":                          {"compType"},
	"cudnnGetCTCLossWorkspaceSize":                       {"sizeInBytes"},
	"cudnnGetCallback":                                   {"mask", "udata", "fptr"},
	"cudnnGetConvolution2dDescriptor":                    {"pad_h", "pad_w", "u", "v", "dilation_h", "dilation_w", "mode", "computeType"},
	"cudnnGetConvolution2dForwardOutputDim":              {"n", "c", "h", "w"},
	"cudnnGetConvolutionBackwardDataAlgorithm":           {"algo"},
	"cudnnGetConvolutionBackwardDataAlgorithmMaxCount":   {"count"},
	"cudnnGetConvolutionBackwardDataAlgorithm_v7":        {"returnedAlgoCount", "perfResults"},
	"cudnnGetConvolutionBackwardDataWorkspaceSize":       {"sizeInBytes"},
	"cudnnGetConvolutionBackwardFilterAlgorithm":         {"algo"},
	"cudnnGetConvolutionBackwardFilterAlgorithmMaxCount": {"count"},
	"cudnnGetConvolutionBackwardFilterAlgorithm_v7":      {"returnedAlgoCount", "perfResults"},
	"cudnnGetConvolutionBackwardFilterWorkspaceSize":     {"sizeInBytes"},
	"cudnnGetConvolutionForwardAlgorithm":                {"algo"},
	"cudnnGetConvolutionForwardAlgorithmMaxCount":        {"count"},
	"cudnnGetConvolutionForwardAlgorithm_v7":             {"returnedAlgoCount", "perfResults"},
	"cudnnGetConvolutionForwardWorkspaceSize":            {"sizeInBytes"},
	"cudnnGetConvolutionNdDescriptor":                    {"arrayLength", "padA", "filterStrideA", "dilationA", "mode", "datatype"},
	"cudnnGetConvolutionNdForwardOutputDim":              {"tensorOuputDimA"},
	"cudnnGetDropoutDescriptor":                          {"dropout", "states", "seed"},
	"cudnnGetFilter4dDescriptor":                         {"datatype", "format", "k", "c", "h", "w"},
	"cudnnGetFilterNdDescriptor":                         {"datatype", "format", "nbDims", "filterDimA"},
	"cudnnGetLRNDescriptor":                              {"normDesc", "lrnN", "lrnAlpha", "lrnBeta", "lrnK"},
	"cudnnGetOpTensorDescriptor":                         {"opTensorOp", "opTensorCompType", "opTensorNanOpt"},
	"cudnnGetPooling2dDescriptor":                        {"mode", "maxpoolingNanOpt", "windowHeight", "windowWidth", "verticalPadding", "horizontalPadding", "verticalStride", "horizontalStride"},
	"cudnnGetPooling2dForwardOutputDim":                  {"n", "c", "h", "w"}, // docs on the internet has capitalized retVals
	"cudnnGetPoolingNdDescriptor":                        {"mode", "nbDims", "windowDimA", "paddingA", "strideA"},
	"cudnnGetPoolingNdForwardOutputDim":                  {"outDimA"},
	"cudnnGetProperty":                                   {"value"},
	"cudnnGetRNNDataDescriptor":                          {"dataType", "layout", "maxSeqLength", "batchSize", "vectorSize", "seqLengthArray", "paddingFill"},
	"cudnnGetRNNDescriptor":                              {"hiddenSize", "numLayers", "dropoutDesc", "inputMode", "direction", "mode", "algo", "dataType"},
	"cudnnGetRNNLinLayerBiasParams":                      {"linLayerBiasDesc", "linLayerBias"},
	"cudnnGetRNNLinLayerMatrixParams":                    {"linLayerMatDesc", "linLayerMat"},
	"cudnnGetRNNParamsSize":                              {"sizeInBytes"},
	"cudnnGetRNNProjectionLayers":                        {"recProjSize", "outProjSize"},
	"cudnnGetRNNTrainingReserveSize":                     {"sizeInBytes"},
	"cudnnGetRNNWorkspaceSize":                           {"sizeInBytes"},
	"cudnnGetReduceTensorDescriptor":                     {"reduceTensorOp", "reduceTensorCompType", "reduceTensorIndices", "reduceTensorIndicesType"},
	"cudnnGetReductionIndicesSize":                       {"sizeInBytes"},
	"cudnnGetReductionWorkspaceSize":                     {"sizeInBytes"},
	"cudnnGetStream":                                     {"streamID"},
	"cudnnGetTensor4dDescriptor":                         {"datatype", "n", "c", "h", "w", "nStride", "cStride", "hStride", "wStride"},
	"cudnnGetTensorNdDescriptor":                         {"datatype", "nbDims", "dimA"},
	"cudnnGetTensorSizeInBytes":                          {"size"},
	"cudnnIm2Col":                                        {"colBuffer"},
	"cudnnLRNCrossChannelBackward":                       {"dxDesc", "dx"},
	"cudnnLRNCrossChannelForward":                        {"y"},
	"cudnnPoolingBackward":                               {"dx"},
	"cudnnPoolingForward":                                {"y"},
	"cudnnQueryRuntimeError":                             {"rstatus"},
	"cudnnRNNBackwardData":                               {"dx", "dhx", "dcx"},
	"cudnnRNNBackwardDataEx":                             {"dx", "dhx", "dcx"},
	"cudnnRNNForwardInference":                           {"y", "hy", "cy"},
	"cudnnRNNForwardInferenceEx":                         {"y", "hy", "cy"},
	"cudnnRNNForwardTraining":                            {"y", "hy", "cy"},
	"cudnnRNNForwardTrainingEx":                          {"y", "hy", "cy"},
	"cudnnRNNGetClip":                                    {"*clipMode", "*lclip", "*rclip", "*clipNanOpt"},
	"cudnnReduceTensor":                                  {"indices"},
	"cudnnSetCTCLossDescriptor":                          {"ctcLossDesc"},
	"cudnnSetDropoutDescriptor":                          {"states"},
	"cudnnSetLRNDescriptor":                              {"normDesc"},
	"cudnnSetOpTensorDescriptor":                         {"opTensorDesc"},
	"cudnnSetPoolingNdDescriptor":                        {"windowDimA", "paddingA", "strideA"},
	"cudnnSetTensorNdDescriptorEx":                       {"tensorDesc"},
	"cudnnSoftmaxBackward":                               {"dx"},
	"cudnnSoftmaxForward":                                {"y"},
	"cudnnSpatialTfGridGeneratorBackward":                {"dtheta"},
	"cudnnSpatialTfGridGeneratorForward":                 {"grid"},
	"cudnnSpatialTfSamplerBackward":                      {"dx", "dgrid"},
	"cudnnSpatialTfSamplerForward":                       {"y"},
	"cudnnTransformTensor":                               {"y"},
}
var ioParams = map[string][]string{
	"cudnnAddTensor":                                {"C"},
	"cudnnConvolutionBackwardData":                  {"dx"},
	"cudnnConvolutionBackwardFilter":                {"dw"},
	"cudnnConvolutionBiasActivationForward":         {"y"},
	"cudnnConvolutionForward":                       {"y"},
	"cudnnFindConvolutionBackwardDataAlgorithmEx":   {"dxDesc"},
	"cudnnFindConvolutionBackwardFilterAlgorithmEx": {"dw"},
	"cudnnFindConvolutionForwardAlgorithmEx":        {"y"},
	"cudnnFindRNNBackwardDataAlgorithmEx":           {"reserveSpace"},
	"cudnnFindRNNBackwardWeightsAlgorithmEx":        {"dw"},
	"cudnnFindRNNForwardTrainingAlgorithmEx":        {"reserveSpace"},
	"cudnnGetAlgorithmPerformance":                  {"algoPerf"},
	"cudnnGetConvolution2dDescriptor":               {"convDesc"},
	"cudnnGetConvolutionNdDescriptor":               {"convDesc"},
	"cudnnGetRNNPaddingMode":                        {"rnnDesc"},
	"cudnnOpTensor":                                 {"C"},
	"cudnnQueryRuntimeError":                        {"tag"},
	"cudnnRNNBackwardData":                          {"reserveSpace"},
	"cudnnRNNBackwardDataEx":                        {"reserveSpace"},
	"cudnnRNNBackwardWeights":                       {"dw"},
	"cudnnRNNBackwardWeightsEx":                     {"dw"},
	"cudnnRNNForwardTraining":                       {"reserveSpace"},
	"cudnnRNNForwardTrainingEx":                     {"reserveSpace"},
	"cudnnReduceTensor":                             {"C"},
	"cudnnRestoreDropoutDescriptor":                 {"dropoutDesc"},
	"cudnnScaleTensor":                              {"y"},
	"cudnnSetActivationDescriptor":                  {"activationDesc"},
	"cudnnSetAlgorithmDescriptor":                   {"algorithmDesc"},
	"cudnnSetAlgorithmPerformance":                  {"algoPerf"},
	"cudnnSetConvolution2dDescriptor":               {"convDesc"},
	"cudnnSetConvolutionNdDescriptor":               {"convDesc"},
	"cudnnSetDropoutDescriptor":                     {"dropoutDesc"},
	"cudnnSetFilter4dDescriptor":                    {"filterDesc"},
	"cudnnSetFilterNdDescriptor":                    {"filterDesc"},
	"cudnnSetPooling2dDescriptor":                   {"poolingDesc"},
	"cudnnSetPoolingNdDescriptor":                   {"poolingDesc"},
	"cudnnSetRNNDataDescriptor":                     {"RNNDataDesc"},
	"cudnnSetRNNDescriptor":                         {"rnnDesc"},
	"cudnnSetRNNDescriptor_v5":                      {"rnnDesc"},
	"cudnnSetRNNDescriptor_v6":                      {"rnnDesc"},
	"cudnnSetRNNPaddingMode":                        {"rnnDesc"},
	"cudnnSetReduceTensorDescriptor":                {"reduceTensorDesc"},
	"cudnnSetSpatialTransformerNdDescriptor":        {"stDesc"},
	"cudnnSetTensor":                                {"y"},
	"cudnnSetTensor4dDescriptor":                    {"tensorDesc"},
	"cudnnSetTensor4dDescriptorEx":                  {"tensorDesc"},
	"cudnnSetTensorNdDescriptor":                    {"tensorDesc"},
}
var docs = map[string]string{
	"cudnnActivationBackward":                            "cudnnActivationBackward computes the gradient of a neuron activation function.",
	"cudnnActivationForward":                             "cudnnActivationForward applies a specified neuron activation function element-wise over each input value.",
	"cudnnAddTensor":                                     "cudnnAddTensor adds the scaled values of a bias tensor to another tensor. Each dimension of the bias tensor A must match the corresponding dimension of the destination tensor C or must be equal to 1. In the latter case, the same value from the bias tensor for those dimensions will be used to blend into the C tensor.",
	"cudnnBatchNormalizationBackward":                    "cudnnBatchNormalizationBackward performs the backward BatchNormalization layer computation.",
	"cudnnBatchNormalizationForwardInference":            "cudnnBatchNormalizationForwardInference performs the forward BatchNormalization layer computation for inference phase. cudnnBatchNormalizationForwardInference layer is based on the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift`, S. Ioffe, C. Szegedy, 2015.",
	"cudnnBatchNormalizationForwardTraining":             "cudnnBatchNormalizationForwardTraining performs the forward BatchNormalization layer computation for training phase.",
	"cudnnCTCLoss":                                       "cudnnCTCLoss returns the ctc costs and gradients, given the probabilities and labels.",
	"cudnnConvolutionBackwardBias":                       "cudnnConvolutionBackwardBias computes the convolution function gradient with respect to the bias, which is the sum of every element belonging to the same feature map across all of the images of the input tensor. Therefore, the number of elements produced is equal to the number of features maps of the input tensor.",
	"cudnnConvolutionBackwardData":                       "cudnnConvolutionBackwardData computes the convolution gradient with respect to the output tensor using the specified algo, returning results in gradDesc. Scaling factors alpha and beta can be used to scale the input tensor and the output tensor respectively.",
	"cudnnConvolutionBackwardFilter":                     "cudnnConvolutionBackwardFilter computes the convolution gradient with respect to filter coefficients using the specified algo, returning results in gradDesc.Scaling factors alpha and beta can be used to scale the input tensor and the output tensor respectively.",
	"cudnnConvolutionBiasActivationForward":              "cudnnConvolutionBiasActivationForward applies a bias and then an activation to the convolutions or cross-correlations of cudnnConvolutionForward(), returning results in y. The full computation follows the equation y = act ( alpha1 * conv(x) + alpha2 * z + bias ).",
	"cudnnConvolutionForward":                            "cudnnConvolutionForward executes convolutions or cross-correlations over x using filters specified with w, returning results in y. Scaling factors alpha and beta can be used to scale the input tensor and the output tensor respectively.",
	"cudnnCreate":                                        "cudnnCreate initializes the cuDNN library and creates a handle to an opaque structure holding the cuDNN library context. It allocates hardware resources on the host and device and must be called prior to making any other cuDNN library calls. The cuDNN library handle is tied to the current CUDA device (context). To use the library on multiple devices, one cuDNN handle needs to be created for each device. For a given device, multiple cuDNN handles with different configurations (e.g., different current CUDA streams) may be created. Because cudnnCreate allocates some internal resources, the release of those resources by calling cudnnDestroy will implicitly call cudaDeviceSynchronize; therefore, the recommended best practice is to call cudnnCreate/cudnnDestroy outside of performance-critical code paths. For multithreaded applications that use the same device from different threads, the recommended programming model is to create one (or a few, as is convenient) cuDNN handle(s) per thread and use that cuDNN handle for the entire life of the thread.",
	"cudnnCreateCTCLossDescriptor":                       "cudnnCreateCTCLossDescriptor creates a CTC loss function descriptor. .",
	"cudnnCreateOpTensorDescriptor":                      "cudnnCreateOpTensorDescriptor creates a Tensor Pointwise math descriptor.",
	"cudnnCreateTensorDescriptor":                        "cudnnCreateTensorDescriptor creates a generic tensor descriptor object by allocating the memory needed to hold its opaque structure. The data is initialized to be all zero.",
	"cudnnDeriveBNTensorDescriptor":                      "Derives a secondary tensor descriptor for BatchNormalization scale, invVariance, bnBias, bnScale subtensors from the layer's x data descriptor. Use the tensor descriptor produced by this function as the bnScaleBiasMeanVarDesc and bnScaleBiasDiffDesc parameters in Spatial and Per-Activation Batch Normalization forward and backward functions. Resulting dimensions will be 1xC(x1)x1x1 for BATCHNORM_MODE_SPATIAL and 1xC(xD)xHxW for BATCHNORM_MODE_PER_ACTIVATION (parentheses for 5D). For HALF input data type the resulting tensor descriptor will have a FLOAT type. For other data types it will have the same type as the input data.",
	"cudnnDestroy":                                       "cudnnDestroy releases resources used by the cuDNN handle. cudnnDestroy is usually the last call with a particular handle to the cuDNN handle. Because cudnnCreate allocates some internal resources, the release of those resources by calling cudnnDestroy will implicitly call cudaDeviceSynchronize; therefore, the recommended best practice is to call cudnnCreate/cudnnDestroy outside of performance-critical code paths.",
	"cudnnDestroyCTCLossDescriptor":                      "cudnnDestroyCTCLossDescriptor destroys a CTC loss function descriptor object.",
	"cudnnDestroyOpTensorDescriptor":                     "cudnnDestroyOpTensorDescriptor deletes a Tensor Pointwise math descriptor object.",
	"cudnnDestroyReduceTensorDescriptor":                 "cudnnDestroyReduceTensorDescriptor destroys a previously created reduce tensor descriptor object. When the input pointer is NULL, this function performs no destroy operation.",
	"cudnnDestroyTensorDescriptor":                       "cudnnDestroyTensorDescriptor destroys a previously created tensor descriptor object. When the input pointer is NULL, this function performs no destroy operation.",
	"cudnnDivisiveNormalizationBackward":                 "cudnnDivisiveNormalizationBackward performs the backward DivisiveNormalization layer computation.",
	"cudnnDivisiveNormalizationForward":                  "cudnnDivisiveNormalizationForward performs the forward spatial DivisiveNormalization layer computation. It divides every value in a layer by the standard deviation of it's spatial neighbors as described in `What is the Best Multi-Stage Architecture for Object Recognition`, Jarrett 2009, Local Contrast Normalization Layer section. Note that Divisive Normalization only implements the x/max(c, sigma_x) portion of the computation, where sigma_x is the variance over the spatial neighborhood of x. The full LCN (Local Contrastive Normalization) computation can be implemented as a two-step process:",
	"cudnnDropoutBackward":                               "cudnnDropoutBackward performs backward dropout operation over dy returning results in dx. If during forward dropout operation value from x was propagated to y then during backward operation value from dy will be propagated to dx, otherwise, dx value will be set to 0.",
	"cudnnDropoutForward":                                "cudnnDropoutForward performs forward dropout operation over x returning results in y. If dropout was used as a parameter to cudnnSetDropoutDescriptor, the approximately dropout fraction of x values will be replaces by 0, and the rest will be scaled by 1/(1-dropout) cudnnDropoutForward should not be running concurrently with another cudnnDropoutForward function using the same states.",
	"cudnnDropoutGetReserveSpaceSize":                    "cudnnDropoutGetReserveSpaceSize is used to query the amount of reserve needed to run dropout with the input dimensions given by xDesc. The same reserve space is expected to be passed to cudnnDropoutForward and cudnnDropoutBackward, and its contents is expected to remain unchanged between cudnnDropoutForward and cudnnDropoutBackward calls.",
	"cudnnDropoutGetStatesSize":                          "cudnnDropoutGetStatesSize is used to query the amount of space required to store the states of the random number generators used by cudnnDropoutForward function.",
	"cudnnFindConvolutionBackwardDataAlgorithm":          "cudnnFindConvolutionBackwardDataAlgorithm attempts all cuDNN algorithms (including CUDNN_TENSOR_OP_MATH and CUDNN_DEFAULT_MATH versions of algorithms where CUDNN_TENSOR_OP_MATH may be available) for cudnnConvolutionBackwardData(), using memory allocated via cudaMalloc() and outputs performance metrics to a user-allocated array of cudnnConvolutionBwdDataAlgoPerf_t. These metrics are written in sorted fashion where the first element has the lowest compute time. The total number of resulting algorithms can be queried through the API cudnnGetConvolutionBackwardMaxCount().",
	"cudnnFindConvolutionBackwardDataAlgorithmEx":        "cudnnFindConvolutionBackwardDataAlgorithmEx attempts all cuDNN algorithms (including CUDNN_TENSOR_OP_MATH and CUDNN_DEFAULT_MATH versions of algorithms where CUDNN_TENSOR_OP_MATH may be available) for cudnnConvolutionBackwardData, using user-allocated GPU memory, and outputs performance metrics to a user-allocated array of cudnnConvolutionBwdDataAlgoPerf_t. These metrics are written in sorted fashion where the first element has the lowest compute time. The total number of resulting algorithms can be queried through the API cudnnGetConvolutionBackwardMaxCount().",
	"cudnnFindConvolutionBackwardFilterAlgorithm":        "cudnnFindConvolutionBackwardFilterAlgorithm attempts all cuDNN algorithms (including CUDNN_TENSOR_OP_MATH and CUDNN_DEFAULT_MATH versions of algorithms where CUDNN_TENSOR_OP_MATH may be available) for cudnnConvolutionBackwardFilter(), using GPU memory allocated via cudaMalloc(), and outputs performance metrics to a user-allocated array of cudnnConvolutionBwdFilterAlgoPerf_t. These metrics are written in sorted fashion where the first element has the lowest compute time. The total number of resulting algorithms can be queried through the API cudnnGetConvolutionBackwardMaxCount().",
	"cudnnFindConvolutionBackwardFilterAlgorithmEx":      "cudnnFindConvolutionBackwardFilterAlgorithmEx attempts all cuDNN algorithms (including CUDNN_TENSOR_OP_MATH and CUDNN_DEFAULT_MATH versions of algorithms where CUDNN_TENSOR_OP_MATH may be available) for cudnnConvolutionBackwardFilter, using user-allocated GPU memory, and outputs performance metrics to a user-allocated array of cudnnConvolutionBwdFilterAlgoPerf_t. These metrics are written in sorted fashion where the first element has the lowest compute time. The total number of resulting algorithms can be queried through the API cudnnGetConvolutionBackwardMaxCount().",
	"cudnnFindConvolutionForwardAlgorithm":               "cudnnFindConvolutionForwardAlgorithm attempts all cuDNN algorithms (including CUDNN_TENSOR_OP_MATH and CUDNN_DEFAULT_MATH versions of algorithms where CUDNN_TENSOR_OP_MATH may be available) for cudnnConvolutionForward(), using memory allocated via cudaMalloc(), and outputs performance metrics to a user-allocated array of cudnnConvolutionFwdAlgoPerf_t. These metrics are written in sorted fashion where the first element has the lowest compute time. The total number of resulting algorithms can be queried through the API cudnnGetConvolutionForwardMaxCount().",
	"cudnnFindConvolutionForwardAlgorithmEx":             "cudnnFindConvolutionForwardAlgorithmEx attempts all available cuDNN algorithms (including CUDNN_TENSOR_OP_MATH and CUDNN_DEFAULT_MATH versions of algorithms where CUDNN_TENSOR_OP_MATH may be available) for cudnnConvolutionForward, using user-allocated GPU memory, and outputs performance metrics to a user-allocated array of cudnnConvolutionFwdAlgoPerf_t. These metrics are written in sorted fashion where the first element has the lowest compute time. The total number of resulting algorithms can be queried through the API cudnnGetConvolutionForwardMaxCount().",
	"cudnnFindRNNBackwardDataAlgorithmEx":                "(New for 7.1)",
	"cudnnFindRNNBackwardWeightsAlgorithmEx":             "(New for 7.1)",
	"cudnnFindRNNForwardInferenceAlgorithmEx":            "(New for 7.1)",
	"cudnnFindRNNForwardTrainingAlgorithmEx":             "(New for 7.1)",
	"cudnnGetActivationDescriptor":                       "cudnnGetActivationDescriptor queries a previously initialized generic activation descriptor object.",
	"cudnnGetAlgorithmDescriptor":                        "(New for 7.1)",
	"cudnnGetAlgorithmPerformance":                       "(New for 7.1)",
	"cudnnGetAlgorithmSpaceSize":                         "(New for 7.1)",
	"cudnnGetCTCLossDescriptor":                          "cudnnGetCTCLossDescriptor returns configuration of the passed CTC loss function descriptor.",
	"cudnnGetCTCLossWorkspaceSize":                       "cudnnGetCTCLossWorkspaceSize returns the amount of GPU memory workspace the user needs to allocate to be able to call cudnnCTCLoss with the specified algorithm. The workspace allocated will then be passed to the routine cudnnCTCLoss.",
	"cudnnGetCallback":                                   "(New for 7.1)",
	"cudnnGetConvolution2dDescriptor":                    "cudnnGetConvolution2dDescriptor queries a previously initialized 2D convolution descriptor object.",
	"cudnnGetConvolution2dForwardOutputDim":              "cudnnGetConvolution2dForwardOutputDim returns the dimensions of the resulting 4D tensor of a 2D convolution, given the convolution descriptor, the input tensor descriptor and the filter descriptor cudnnGetConvolution2dForwardOutputDim can help to setup the output tensor and allocate the proper amount of memory prior to launch the actual convolution.",
	"cudnnGetConvolutionBackwardDataAlgorithm":           "cudnnGetConvolutionBackwardDataAlgorithm serves as a heuristic for obtaining the best suited algorithm for cudnnConvolutionBackwardData for the given layer specifications. Based on the input preference, this function will either return the fastest algorithm or the fastest algorithm within a given memory limit. For an exhaustive search for the fastest algorithm, please use cudnnFindConvolutionBackwardDataAlgorithm.",
	"cudnnGetConvolutionBackwardDataAlgorithmMaxCount":   "cudnnGetConvolutionBackwardDataAlgorithmMaxCount returns the maximum number of algorithms which can be returned from cudnnFindConvolutionBackwardDataAlgorithm() and cudnnGetConvolutionForwardAlgorithm_v7(). cudnnGetConvolutionBackwardDataAlgorithmMaxCount is the sum of all algorithms plus the sum of all algorithms with Tensor Core operations supported for the current device.",
	"cudnnGetConvolutionBackwardDataAlgorithm_v7":        "cudnnGetConvolutionBackwardDataAlgorithm_v7 serves as a heuristic for obtaining the best suited algorithm for cudnnConvolutionBackwardData for the given layer specifications. cudnnGetConvolutionBackwardDataAlgorithm_v7 will return all algorithms (including CUDNN_TENSOR_OP_MATH and CUDNN_DEFAULT_MATH versions of algorithms where CUDNN_TENSOR_OP_MATH may be available) sorted by expected (based on internal heuristic) relative performance with fastest being index 0 of perfResults. For an exhaustive search for the fastest algorithm, please use cudnnFindConvolutionBackwardDataAlgorithm. The total number of resulting algorithms can be queried through the API cudnnGetConvolutionBackwardMaxCount().",
	"cudnnGetConvolutionBackwardDataWorkspaceSize":       "cudnnGetConvolutionBackwardDataWorkspaceSize returns the amount of GPU memory workspace the user needs to allocate to be able to call cudnnConvolutionBackwardData with the specified algorithm. The workspace allocated will then be passed to the routine cudnnConvolutionBackwardData. The specified algorithm can be the result of the call to cudnnGetConvolutionBackwardDataAlgorithm or can be chosen arbitrarily by the user. Note that not every algorithm is available for every configuration of the input tensor and/or every configuration of the convolution descriptor.",
	"cudnnGetConvolutionBackwardFilterAlgorithm":         "cudnnGetConvolutionBackwardFilterAlgorithm serves as a heuristic for obtaining the best suited algorithm for cudnnConvolutionBackwardFilter for the given layer specifications. Based on the input preference, this function will either return the fastest algorithm or the fastest algorithm within a given memory limit. For an exhaustive search for the fastest algorithm, please use cudnnFindConvolutionBackwardFilterAlgorithm.",
	"cudnnGetConvolutionBackwardFilterAlgorithmMaxCount": "cudnnGetConvolutionBackwardFilterAlgorithmMaxCount returns the maximum number of algorithms which can be returned from cudnnFindConvolutionBackwardFilterAlgorithm() and cudnnGetConvolutionForwardAlgorithm_v7(). cudnnGetConvolutionBackwardFilterAlgorithmMaxCount is the sum of all algorithms plus the sum of all algorithms with Tensor Core operations supported for the current device.",
	"cudnnGetConvolutionBackwardFilterAlgorithm_v7":      "cudnnGetConvolutionBackwardFilterAlgorithm_v7 serves as a heuristic for obtaining the best suited algorithm for cudnnConvolutionBackwardFilter for the given layer specifications. cudnnGetConvolutionBackwardFilterAlgorithm_v7 will return all algorithms (including CUDNN_TENSOR_OP_MATH and CUDNN_DEFAULT_MATH versions of algorithms where CUDNN_TENSOR_OP_MATH may be available) sorted by expected (based on internal heuristic) relative performance with fastest being index 0 of perfResults. For an exhaustive search for the fastest algorithm, please use cudnnFindConvolutionBackwardFilterAlgorithm. The total number of resulting algorithms can be queried through the API cudnnGetConvolutionBackwardMaxCount().",
	"cudnnGetConvolutionBackwardFilterWorkspaceSize":     "cudnnGetConvolutionBackwardFilterWorkspaceSize returns the amount of GPU memory workspace the user needs to allocate to be able to call cudnnConvolutionBackwardFilter with the specified algorithm. The workspace allocated will then be passed to the routine cudnnConvolutionBackwardFilter. The specified algorithm can be the result of the call to cudnnGetConvolutionBackwardFilterAlgorithm or can be chosen arbitrarily by the user. Note that not every algorithm is available for every configuration of the input tensor and/or every configuration of the convolution descriptor.",
	"cudnnGetConvolutionForwardAlgorithm":                "cudnnGetConvolutionForwardAlgorithm serves as a heuristic for obtaining the best suited algorithm for cudnnConvolutionForward for the given layer specifications. Based on the input preference, this function will either return the fastest algorithm or the fastest algorithm within a given memory limit. For an exhaustive search for the fastest algorithm, please use cudnnFindConvolutionForwardAlgorithm.",
	"cudnnGetConvolutionForwardAlgorithmMaxCount":        "cudnnGetConvolutionForwardAlgorithmMaxCount returns the maximum number of algorithms which can be returned from cudnnFindConvolutionForwardAlgorithm() and cudnnGetConvolutionForwardAlgorithm_v7(). cudnnGetConvolutionForwardAlgorithmMaxCount is the sum of all algorithms plus the sum of all algorithms with Tensor Core operations supported for the current device.",
	"cudnnGetConvolutionForwardAlgorithm_v7":             "cudnnGetConvolutionForwardAlgorithm_v7 serves as a heuristic for obtaining the best suited algorithm for cudnnConvolutionForward for the given layer specifications. cudnnGetConvolutionForwardAlgorithm_v7 will return all algorithms (including CUDNN_TENSOR_OP_MATH and CUDNN_DEFAULT_MATH versions of algorithms where CUDNN_TENSOR_OP_MATH may be available) sorted by expected (based on internal heuristic) relative performance with fastest being index 0 of perfResults. For an exhaustive search for the fastest algorithm, please use cudnnFindConvolutionForwardAlgorithm. The total number of resulting algorithms can be queried through the API cudnnGetConvolutionForwardMaxCount().",
	"cudnnGetConvolutionForwardWorkspaceSize":            "cudnnGetConvolutionForwardWorkspaceSize returns the amount of GPU memory workspace the user needs to allocate to be able to call cudnnConvolutionForward with the specified algorithm. The workspace allocated will then be passed to the routine cudnnConvolutionForward. The specified algorithm can be the result of the call to cudnnGetConvolutionForwardAlgorithm or can be chosen arbitrarily by the user. Note that not every algorithm is available for every configuration of the input tensor and/or every configuration of the convolution descriptor.",
	"cudnnGetConvolutionNdDescriptor":                    "cudnnGetConvolutionNdDescriptor queries a previously initialized convolution descriptor object.",
	"cudnnGetConvolutionNdForwardOutputDim":              "cudnnGetConvolutionNdForwardOutputDim returns the dimensions of the resulting n-D tensor of a nbDims-2-D convolution, given the convolution descriptor, the input tensor descriptor and the filter descriptor cudnnGetConvolutionNdForwardOutputDim can help to setup the output tensor and allocate the proper amount of memory prior to launch the actual convolution.",
	"cudnnGetDropoutDescriptor":                          "cudnnGetDropoutDescriptor queries the fields of a previously initialized dropout descriptor.",
	"cudnnGetErrorString":                                "cudnnGetErrorString converts the cuDNN status code to a NUL terminated (ASCIIZ) static string. For example, when the input argument is CUDNN_STATUS_SUCCESS, the returned string is `CUDNN_STATUS_SUCCESS`. When an invalid status value is passed to the function, the returned string is `CUDNN_UNKNOWN_STATUS`.",
	"cudnnGetFilter4dDescriptor":                         "cudnnGetFilter4dDescriptor queries the parameters of the previouly initialized filter descriptor object.",
	"cudnnGetFilterNdDescriptor":                         "cudnnGetFilterNdDescriptor queries a previously initialized filter descriptor object.",
	"cudnnGetLRNDescriptor":                              "cudnnGetLRNDescriptor retrieves values stored in the previously initialized LRN descriptor object.",
	"cudnnGetOpTensorDescriptor":                         "cudnnGetOpTensorDescriptor returns configuration of the passed Tensor Pointwise math descriptor.",
	"cudnnGetPooling2dDescriptor":                        "cudnnGetPooling2dDescriptor queries a previously created 2D pooling descriptor object.",
	"cudnnGetPooling2dForwardOutputDim":                  "cudnnGetPooling2dForwardOutputDim provides the output dimensions of a tensor after 2d pooling has been applied",
	"cudnnGetPoolingNdDescriptor":                        "cudnnGetPoolingNdDescriptor queries a previously initialized generic pooling descriptor object.",
	"cudnnGetPoolingNdForwardOutputDim":                  "cudnnGetPoolingNdForwardOutputDim provides the output dimensions of a tensor after Nd pooling has been applied",
	"cudnnGetProperty":                                   "cudnnGetProperty writes a specific part of the cuDNN library version number into the provided host storage.",
	"cudnnGetRNNDataDescriptor":                          "cudnnGetRNNDataDescriptor retrieves a previously created RNN data descriptor object.",
	"cudnnGetRNNDescriptor":                              "cudnnGetRNNDescriptor retrieves RNN network parameters that were configured by cudnnSetRNNDescriptor(). All pointers passed to the function should be not-NULL or CUDNN_STATUS_BAD_PARAM is reported. The function does not check the validity of retrieved network parameters. The parameters are verified when they are written to the RNN descriptor.",
	"cudnnGetRNNLinLayerBiasParams":                      "cudnnGetRNNLinLayerBiasParams is used to obtain a pointer and a descriptor of every RNN bias column vector in each pseudo-layer within the recurrent network defined by rnnDesc and its input width specified in xDesc.",
	"cudnnGetRNNLinLayerMatrixParams":                    "cudnnGetRNNLinLayerMatrixParams is used to obtain a pointer and a descriptor of every RNN weight matrix in each pseudo-layer within the recurrent network defined by rnnDesc and its input width specified in xDesc.",
	"cudnnGetRNNParamsSize":                              "cudnnGetRNNParamsSize is used to query the amount of parameter space required to execute the RNN described by rnnDesc with inputs dimensions defined by xDesc.",
	"cudnnGetRNNPaddingMode":                             "cudnnGetRNNPaddingMode retrieves the RNN padding mode from the RNN descriptor.",
	"cudnnGetRNNProjectionLayers":                        "(New for 7.1)",
	"cudnnGetRNNTrainingReserveSize":                     "cudnnGetRNNTrainingReserveSize is used to query the amount of reserved space required for training the RNN described by rnnDesc with inputs dimensions defined by xDesc. The same reserved space buffer must be passed to cudnnRNNForwardTraining, cudnnRNNBackwardData and cudnnRNNBackwardWeights. Each of these calls overwrites the contents of the reserved space, however it can safely be backed up and restored between calls if reuse of the memory is desired.",
	"cudnnGetRNNWorkspaceSize":                           "cudnnGetRNNWorkspaceSize is used to query the amount of work space required to execute the RNN described by rnnDesc with inputs dimensions defined by xDesc.",
	"cudnnGetReduceTensorDescriptor":                     "cudnnGetReduceTensorDescriptor queries a previously initialized reduce tensor descriptor object.",
	"cudnnGetReductionIndicesSize":                       "cudnnGetReductionIndicesSize is a helper function to return the minimum size of the index space to be passed to the reduction given the input and output tensors.",
	"cudnnGetReductionWorkspaceSize":                     "cudnnGetReductionWorkspaceSize is a helper function to return the minimum size of the workspace to be passed to the reduction given the input and output tensors.",
	"cudnnGetStream":                                     "cudnnGetStream retrieves the user CUDA stream programmed in the cuDNN handle. When the user's CUDA stream was not set in the cuDNN handle, this function reports the null-stream.",
	"cudnnGetTensor4dDescriptor":                         "cudnnGetTensor4dDescriptor queries the parameters of the previouly initialized Tensor4D descriptor object.",
	"cudnnGetTensorNdDescriptor":                         "cudnnGetTensorNdDescriptor retrieves values stored in a previously initialized Tensor descriptor object.",
	"cudnnGetTensorSizeInBytes":                          "cudnnGetTensorSizeInBytes returns the size of the tensor in memory in respect to the given descriptor. cudnnGetTensorSizeInBytes can be used to know the amount of GPU memory to be allocated to hold that tensor.",
	"cudnnIm2Col":                                        "cudnnIm2Col constructs the A matrix necessary to perform a forward pass of GEMM convolution. cudnnIm2Col A matrix has a height of batch_size*y_height*y_width and width of input_channels*filter_height*filter_width, where batch_size is xDesc's first dimension, y_height/y_width are computed from cudnnGetConvolutionNdForwardOutputDim(), input_channels is xDesc's second dimension, filter_height/filter_width are wDesc's third and fourth dimension. The A matrix is stored in format HW-fully-packed in GPU memory.",
	"cudnnLRNCrossChannelBackward":                       "cudnnLRNCrossChannelBackward performs the backward LRN layer computation.",
	"cudnnLRNCrossChannelForward":                        "cudnnLRNCrossChannelForward performs the forward LRN layer computation.",
	"cudnnOpTensor":                                      "cudnnOpTensor implements the equation C = op ( alpha1[0] * A, alpha2[0] * B ) + beta[0] * C, given tensors A, B, and C and scaling factors alpha1, alpha2, and beta. The op to use is indicated by the descriptor opTensorDesc. Currently-supported ops are listed by the cudnnOpTensorOp_t enum.",
	"cudnnPoolingBackward":                               "cudnnPoolingBackward computes the gradient of a pooling operation.",
	"cudnnPoolingForward":                                "cudnnPoolingForward computes pooling of input values (i.e., the maximum or average of several adjacent values) to produce an output with smaller height and/or width.",
	"cudnnQueryRuntimeError":                             "cuDNN library functions perform extensive input argument checking before launching GPU kernels. The last step is to verify that the GPU kernel actually started. When a kernel fails to start, CUDNN_STATUS_EXECUTION_FAILED is returned by the corresponding API call. Typically, after a GPU kernel starts, no runtime checks are performed by the kernel itself -- numerical results are simply written to output buffers.",
	"cudnnRNNBackwardData":                               "cudnnRNNBackwardData executes the recurrent neural network described by rnnDesc with output gradients dy, dhy, dhc, weights w and input gradients dx, dhx, dcx. workspace is required for intermediate storage. The data in reserveSpace must have previously been generated by cudnnRNNForwardTraining. The same reserveSpace data must be used for future calls to cudnnRNNBackwardWeights if they execute on the same input data.",
	"cudnnRNNBackwardDataEx":                             "cudnnRNNBackwardDataEx is the extended version of the function cudnnRNNBackwardData. cudnnRNNBackwardDataEx cudnnRNNBackwardDataEx allows the user to use unpacked (padded) layout for input y and output dx.",
	"cudnnRNNBackwardWeights":                            "cudnnRNNBackwardWeights accumulates weight gradients dw from the recurrent neural network described by rnnDesc with inputs x, hx, and outputs y. The mode of operation in this case is additive, the weight gradients calculated will be added to those already existing in dw. workspace is required for intermediate storage. The data in reserveSpace must have previously been generated by cudnnRNNBackwardData.",
	"cudnnRNNBackwardWeightsEx":                          "cudnnRNNBackwardWeightsEx is the extended version of the function cudnnRNNBackwardWeights. cudnnRNNBackwardWeightsEx cudnnRNNBackwardWeightsEx allows the user to use unpacked (padded) layout for input x and output dw.",
	"cudnnRNNForwardInference":                           "cudnnRNNForwardInference executes the recurrent neural network described by rnnDesc with inputs x, hx, cx, weights w and outputs y, hy, cy. workspace is required for intermediate storage. cudnnRNNForwardInference does not store intermediate data required for training; cudnnRNNForwardTraining should be used for that purpose.",
	"cudnnRNNForwardInferenceEx":                         "cudnnRNNForwardInferenceEx is the extended version of the cudnnRNNForwardInference function. The cudnnRNNForwardTrainingEx allows the user to use unpacked (padded) layout for input x and output y. In the unpacked layout, each sequence in the mini-batch is considered to be of fixed length, specified by maxSeqLength in its corresponding RNNDataDescriptor. Each fixed-length sequence, for example, the nth sequence in the mini-batch, is composed of a valid segment, specified by the seqLengthArray[n] in its corresponding RNNDataDescriptor, and a padding segment to make the combined sequence length equal to maxSeqLength.",
	"cudnnRNNForwardTraining":                            "cudnnRNNForwardTraining executes the recurrent neural network described by rnnDesc with inputs x, hx, cx, weights w and outputs y, hy, cy. workspace is required for intermediate storage. reserveSpace stores data required for training. The same reserveSpace data must be used for future calls to cudnnRNNBackwardData and cudnnRNNBackwardWeights if these execute on the same input data.",
	"cudnnRNNForwardTrainingEx":                          "cudnnRNNForwardTrainingEx is the extended version of the cudnnRNNForwardTraining function. The cudnnRNNForwardTrainingEx allows the user to use unpacked (padded) layout for input x and output y.",
	"cudnnRNNGetClip":                                    "Retrieves the current LSTM cell clipping parameters, and stores them in the arguments provided.",
	"cudnnRNNSetClip":                                    "Sets the LSTM cell clipping mode. The LSTM clipping is disabled by default. When enabled, clipping is applied to all layers. cudnnRNNSetClip cudnnRNNSetClip() function may be called multiple times.",
	"cudnnReduceTensor":                                  "cudnnReduceTensor reduces tensor A by implementing the equation C = alpha * reduce op ( A ) + beta * C, given tensors A and C and scaling factors alpha and beta. The reduction op to use is indicated by the descriptor reduceTensorDesc. Currently-supported ops are listed by the cudnnReduceTensorOp_t enum.",
	"cudnnRestoreAlgorithm":                              "(New for 7.1)",
	"cudnnRestoreDropoutDescriptor":                      "cudnnRestoreDropoutDescriptor restores a dropout descriptor to a previously saved-off state.",
	"cudnnSaveAlgorithm":                                 "(New for 7.1)",
	"cudnnScaleTensor":                                   "cudnnScaleTensor scale all the elements of a tensor by a given factor.",
	"cudnnSetActivationDescriptor":                       "cudnnSetActivationDescriptor initializes a previously created generic activation descriptor object.",
	"cudnnSetAlgorithmDescriptor":                        "(New for 7.1)",
	"cudnnSetAlgorithmPerformance":                       "(New for 7.1)",
	"cudnnSetCTCLossDescriptor":                          "cudnnSetCTCLossDescriptor sets a CTC loss function descriptor.",
	"cudnnSetCallback":                                   "(New for 7.1)",
	"cudnnSetConvolution2dDescriptor":                    "cudnnSetConvolution2dDescriptor initializes a previously created convolution descriptor object into a 2D correlation. cudnnSetConvolution2dDescriptor assumes that the tensor and filter descriptors corresponds to the formard convolution path and checks if their settings are valid. That same convolution descriptor can be reused in the backward path provided it corresponds to the same layer.",
	"cudnnSetConvolutionNdDescriptor":                    "cudnnSetConvolutionNdDescriptor initializes a previously created generic convolution descriptor object into a n-D correlation. That same convolution descriptor can be reused in the backward path provided it corresponds to the same layer. The convolution computation will done in the specified dataType, which can be potentially different from the input/output tensors.",
	"cudnnSetDropoutDescriptor":                          "cudnnSetDropoutDescriptor initializes a previously created dropout descriptor object. If states argument is equal to NULL, random number generator states won't be initialized, and only dropout value will be set. No other function should be writing to the memory pointed at by states argument while this function is running. The user is expected not to change memory pointed at by states for the duration of the computation.",
	"cudnnSetFilter4dDescriptor":                         "cudnnSetFilter4dDescriptor initializes a previously created filter descriptor object into a 4D filter. The layout of the filters must be contiguous in memory.",
	"cudnnSetFilterNdDescriptor":                         "cudnnSetFilterNdDescriptor initializes a previously created filter descriptor object. The layout of the filters must be contiguous in memory.",
	"cudnnSetLRNDescriptor":                              "cudnnSetLRNDescriptor initializes a previously created LRN descriptor object.",
	"cudnnSetOpTensorDescriptor":                         "cudnnSetOpTensorDescriptor initializes a Tensor Pointwise math descriptor.",
	"cudnnSetPooling2dDescriptor":                        "cudnnSetPooling2dDescriptor initializes a previously created generic pooling descriptor object into a 2D description.",
	"cudnnSetPoolingNdDescriptor":                        "cudnnSetPoolingNdDescriptor initializes a previously created generic pooling descriptor object.",
	"cudnnSetRNNDataDescriptor":                          "cudnnSetRNNDataDescriptor initializes a previously created RNN data descriptor object. cudnnSetRNNDataDescriptor data structure is intended to support the unpacked (padded) layout for input and output of extended RNN inference and training functions. A packed (unpadded) layout is also supported for backward compatibility.",
	"cudnnSetRNNDescriptor":                              "cudnnSetRNNDescriptor initializes a previously created RNN descriptor object.",
	"cudnnSetRNNDescriptor_v5":                           "cudnnSetRNNDescriptor_v5 initializes a previously created RNN descriptor object.",
	"cudnnSetRNNDescriptor_v6":                           "cudnnSetRNNDescriptor_v6 initializes a previously created RNN descriptor object.",
	"cudnnSetRNNMatrixMathType":                          "cudnnSetRNNMatrixMathType sets the preferred option to use NVIDIA Tensor Cores accelerators on Volta GPU-s (SM 7.0 or higher). When the mType parameter is CUDNN_TENSOR_OP_MATH, inference and training RNN API-s will attempt use Tensor Cores when weights/biases are of type CUDNN_DATA_HALF or CUDNN_DATA_FLOAT. When RNN weights/biases are stored in the CUDNN_DATA_FLOAT format, the original weights and intermediate results will be down-converted to CUDNN_DATA_HALF before they are used in another recursive iteration.",
	"cudnnSetRNNPaddingMode":                             "cudnnSetRNNPaddingMode enables or disables the padded RNN input/output for a previously created and initialized RNN descriptor. cudnnSetRNNPaddingMode information is required before calling the cudnnGetRNNWorkspaceSize and cudnnGetRNNTrainingReserveSize functions, to determine whether additional workspace and training reserve space is needed. By default the padded RNN input/output is not enabled.",
	"cudnnSetRNNProjectionLayers":                        "(New for 7.1)",
	"cudnnSetReduceTensorDescriptor":                     "cudnnSetReduceTensorDescriptor initializes a previously created reduce tensor descriptor object.",
	"cudnnSetSpatialTransformerNdDescriptor":             "cudnnSetSpatialTransformerNdDescriptor initializes a previously created generic spatial transformer descriptor object.",
	"cudnnSetStream":                                     "cudnnSetStream sets the user's CUDA stream in the cuDNN handle. The new stream will be used to launch cuDNN GPU kernels or to synchronize to this stream when cuDNN kernels are launched in the internal streams. If the cuDNN library stream is not set, all kernels use the default (NULL) stream. Setting the user stream in the cuDNN handle guarantees the issue-order execution of cuDNN calls and other GPU kernels launched in the same stream.",
	"cudnnSetTensor":                                     "cudnnSetTensor sets all the elements of a tensor to a given value.",
	"cudnnSetTensor4dDescriptor":                         "cudnnSetTensor4dDescriptor initializes a previously created generic Tensor descriptor object into a 4D tensor. The strides of the four dimensions are inferred from the format parameter and set in such a way that the data is contiguous in memory with no padding between dimensions.",
	"cudnnSetTensor4dDescriptorEx":                       "cudnnSetTensor4dDescriptorEx initializes a previously created generic Tensor descriptor object into a 4D tensor, similarly to cudnnSetTensor4dDescriptor but with the strides explicitly passed as parameters. cudnnSetTensor4dDescriptorEx can be used to lay out the 4D tensor in any order or simply to define gaps between dimensions.",
	"cudnnSetTensorNdDescriptor":                         "cudnnSetTensorNdDescriptor initializes a previously created generic Tensor descriptor object.",
	"cudnnSetTensorNdDescriptorEx":                       "cudnnSetTensorNdDescriptorEx initializes an n-D tensor descriptor.",
	"cudnnSoftmaxBackward":                               "cudnnSoftmaxBackward computes the gradient of the softmax function.",
	"cudnnSoftmaxForward":                                "cudnnSoftmaxForward computes the softmax function.",
	"cudnnSpatialTfGridGeneratorBackward":                "cudnnSpatialTfGridGeneratorBackward computes the gradient of a grid generation operation.",
	"cudnnSpatialTfGridGeneratorForward":                 "cudnnSpatialTfGridGeneratorForward generates a grid of coordinates in the input tensor corresponding to each pixel from the output tensor.",
	"cudnnSpatialTfSamplerBackward":                      "cudnnSpatialTfSamplerBackward computes the gradient of a sampling operation.",
	"cudnnSpatialTfSamplerForward":                       "cudnnSpatialTfSamplerForward performs a sampler operation and generates the output tensor using the grid given by the grid generator.",
	"cudnnTransformTensor":                               "cudnnTransformTensor copies the scaled data from one tensor to another tensor with a different layout. Those descriptors need to have the same dimensions but not necessarily the same strides. The input and output tensors must not overlap in any way (i.e., tensors cannot be transformed in place). cudnnTransformTensor can be used to convert a tensor with an unsupported format to a supported one.",
}
