
package jsat.distributions.kernels;

import java.io.Serializable;
import java.util.List;
import jsat.linear.Vec;
import jsat.parameters.Parameterized;

/**
 * The KernelTrick is a method can can be used to alter an algorithm to do its 
 * calculations in a projected feature space, without explicitly forming the 
 * features. If an algorithm uses only dot products, the Kernel trick can be 
 * used in place of these dot products, and computes the inner product in a 
 * different feature space. 
 * <br><br>
 * All KerenlTrick objects are {@link Parameterized parameterized} so that the 
 * values of the kernel can be exposed by the algorithm that makes use of these 
 * parameters. To avoid conflicts in parameter names, the parameters of a 
 * KernelTrick should be of the form:<br>
 * &lt; SimpleClassName &gt;_&lt; Variable Name &gt;
 * 
 * @author Edward Raff
 */
public interface KernelTrick extends Parameterized, Cloneable, Serializable
{
    /**
     * Evaluate this kernel function for the two given vectors. 
     * @param a the first vector
     * @param b the first vector
     * @return the evaluation
     */
    public double eval(Vec a, Vec b);
    
    /**
     * A descriptive name for the type of KernelFunction 
     * @return a descriptive name for the type of KernelFunction 
     */
    @Override
    public String toString();
    
    public KernelTrick clone();
    
    //Cache related
    
    /**
     * Indicates if this kernel supports building an acceleration cache
     * using the {@link #getAccelerationCache(java.util.List) } and associated 
     * cache accelerated methods. By default this method will return 
     * {@code false}. If {@code true}, then a cache can be obtained from this
     * matrix and used in conjunction with {@link #eval(int, jsat.linear.Vec, 
     * java.util.List, java.util.List) } and {@link #eval(int, int, 
     * java.util.List, java.util.List) } to perform kernel products. 
     * @return {@code true} if cache acceleration is supported for this kernel, 
     * {@code false} otherwise. 
     */
    public boolean supportsAcceleration();
    
    /**
     * Creates a new list cache values from a given list of training set 
     * vectors. If this kernel does not support acceleration, {@code null} will 
     * be returned.
     *
     * @param trainingSet the list of training set vectors
     * @return a list of cache values that may be used by this kernel
     */
    public List<Double> getAccelerationCache(List<? extends Vec> trainingSet);
    
    /**
     * Pre computes query information that would have be generated if the query 
     * was a member of the original list of vectors when calling 
     * {@link #getAccelerationCache(java.util.List) } . This can then be used if 
     * a large number of kernel computations are going to be done against 
     * points in the original set for a point that is outside the original space.
     * <br><br>
     * If this kernel does not support acceleration, {@code null} will be 
     * returned.
     * 
     * @param q the query point to generate cache information for
     * @return the cache information for the query point
     */
    public List<Double> getQueryInfo(Vec q);
    
    /**
     * Appends the new cache values for the given vector to the list of cache 
     * values. This method is present for online style kernel learning 
     * algorithms, where the set of vectors is not known in advance. When a 
     * vector is added to the set of kernel vectors, its cache values can be 
     * added using this method. <br><br>
     * The results of calling this sequentially on a lit of vectors starting
     * with an empty double list is equivalent to getting the results from 
     * calling {@link #getAccelerationCache(java.util.List) }
     * <br><br>
     * If this kernel does not support acceleration, this method call will 
     * function as a nop. 
     * 
     * @param newVec the new vector to add to the cache values
     * @param cache the original list of cache values to add to
     */
    public void addToCache(Vec newVec, List<Double> cache);
    
    /**
     * Computes the kernel product between one vector in the original list of vectors 
     * with that of another vector not from the original list, but had 
     * information generated by {@link #getQueryInfo(jsat.linear.Vec) }.
     * <br> If the cache input is {@code null}, then 
     * {@link #eval(jsat.linear.Vec, jsat.linear.Vec) } will be called directly.
     * @param a the index of the vector in the cache
     * @param b the other vector
     * @param qi the query information about b
     * @param vecs the list of vectors used to build the cache 
     * @param cache the cache associated with the given list of vectors
     * @return the kernel product of the two vectors
     */
    public double eval(int a, Vec b, List<Double> qi, List<? extends Vec> vecs, List<Double> cache);
    
    /**
     * Produces the correct kernel evaluation given the training set and the
     * cache generated by {@link #getAccelerationCache(java.util.List)  }. The training
     * vectors should be in the same order.
     * 
     * @param a the index of the first training vector
     * @param b the index of the second training vector
     * @param trainingSet the list of training set vectors
     * @param cache the double list of cache values generated by this kernel 
     * for the given training set
     * @return the same kernel evaluation result as 
     * {@link #eval(jsat.linear.Vec, jsat.linear.Vec) }
     */
    public double eval(int a, int b, List<? extends Vec> trainingSet, List<Double> cache);
    
    /**
     * Performs an efficient summation of kernel products of the form <br>
     * <big>&#8721;</big> &alpha;<sub>i</sub> k(x<sub>i</sub>, y) <br>
     * where <i>x</i> are the final set of vectors, and <i>&alpha;</i> the 
     * associated scalar multipliers
     * 
     * @param finalSet the final set of vectors
     * @param cache the cache associated with the final set of vectors
     * @param alpha the coefficients associated with each vector
     * @param y the vector to perform the summed kernel products against
     * @param start the starting index (inclusive) to sum from
     * @param end the ending index (exclusive) to sum from
     * @return the sum of the multiplied kernel products
     */
    public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, int start, int end);
    
    /**
     * Performs an efficient summation of kernel products of the form <br>
     * <big>&#8721;</big> &alpha;<sub>i</sub> k(x<sub>i</sub>, y) <br>
     * where <i>x</i> are the final set of vectors, and <i>&alpha;</i> the 
     * associated scalar multipliers
     * 
     * @param finalSet the final set of vectors
     * @param cache the cache associated with the final set of vectors
     * @param alpha the coefficients associated with each vector
     * @param y the vector to perform the summed kernel products against
     * @param qi the query information about y
     * @param start the starting index (inclusive) to sum from
     * @param end the ending index (exclusive) to sum from
     * @return the sum of the multiplied kernel products
     */
    public double evalSum(List<? extends Vec> finalSet, List<Double> cache, double[] alpha, Vec y, List<Double> qi, int start, int end);
}
