%
% Copyright (c) 2009 Liang Tang
% 
%  SLICE.m
%  written by Liang Tang
%  tangl99@gmail.com
% 
% Version history:
% 
% This is version 1.00, released Feburary 1, 2009
%

% Find the top k linear correlations from the dataset
% @param data: The data matrix. Each row is a data instance.
% @param k: The number of dimensions that are strong correlated.
% @param fmax: The maximum value of k smallest eigenvectors of the strong colrrelated data subsets.
% @param minsup: The minimum proportion of the found subsets
% @param maxloop: The maximum times of loop in greedy search for each data subset
function [corrplanes, sups]=SLICE(data, k, fmax, minsup,maxloop)
    [M,N]=size(data);
    candiclusters={};
    candiplanes={};
    candisups=[];
    % Generate the candidate seeds
    candiseeds=1:M;
    tmpdata=data;
    t=1;
    while length(tmpdata)>N && length(candiseeds)>N
        
        % Get the length of the rest of dataset
        [m, junk]=size(tmpdata);
        
        % Generate the seeds randomly
        cluster=randint(1,N,[1,length(candiseeds)]);
        for i=1:N
            cluster(i)=candiseeds(cluster(i));
        end

        % Call greedy search to search a hyperplane from the seed
        [fcluster,plane,sup]=greedysearch(tmpdata,cluster,k,fmax,minsup,maxloop);
        
        % Check the result from the greedysearch
        if isequal(plane,[])==0
            % If the plane is not null
            candiclusters{end+1}=fcluster;
            candiplanes{end+1}=plane;
            candisups=[candisups,sup];
            candiseeds=setdiff(candiseeds,fcluster);
            % Display the found correlation
            disp(cluster);
            t
        else
            % If the plane is null
            % Subtract the seed from the candiate seed set
            candiseeds=setdiff(candiseeds,fcluster);
        end
        t=t+1;
    end
    corrplanes=candiplanes;
    sups=candisups;
end

% Greedy search a hyperplane from a given seed
function [finalcluster, plane,sup]=greedysearch(data,cluster,k,fmax,minsup,maxloop)
    [datasize,N]=size(data);
    plane=[];
    sup=-1;
    finalcluster=[];
    for t=1:maxloop
        %disp(t);
        
        % Rebuild the data subset from the data indices (cluster)
        cdata=builddata(cluster, data);
        cmean = mean(cdata);
        
        % PCA analysis
        [eigenvectors, eigenvalues]=PCA(cdata);
        
        % Absorb points by the PCA hyperlane
        [newcluster, f]=absorbpoints(eigenvectors', cmean, data, k, fmax);
        
        % Sort the new result 
        newcluster=sort(newcluster);
        finalcluster=newcluster;
        
        % Check the result is same as the old result, or converged.
        if isequal(cluster,newcluster)
            % The result is coverged, end of the loop
            if length(cluster) >= datasize*minsup
                % Calculate the hyperlane equation's coefficients
                coe=eigenvectors(:,1)';
                const = -coe*cmean';
                % Construct the hyperplane
                plane=[coe,const];
                % Calculate the support of this hyperplane
                sup=length(cluster)/datasize;
            end
            finalcluster=cluster;
            break;
        else
            cluster=newcluster;
        end
    end
end

% Build dataset from the indices set of the data
function [cdata]=builddata(cluster, data)
    [numpoint, N]=size(data);
    numpoint=length(cluster);
    cdata=zeros(numpoint,N);
    for i=1:numpoint
        cdata(i,:) = data(cluster(i),:);
    end
end

% Absorb the points by the distance between the point and plane 
function [cluster,f]=absorbpoints(P,cmean,data,k, fmax)
    [M,N] = size(data);
    ydata = zeros(1,M);
    pydata = zeros(N,N,M);
    cluster = zeros(1,M);
    COV = zeros(N,N);
    numpoint = 0;
    for i=1:M
        y=data(i,:)-cmean;
        py=P*y';
        pycov=py*py';
        v=diag(pycov);
        ydata(i)=sum(v(1:k))/sum(v);
        pydata(:,:,i)=pycov;
    end
    
    [junk, rindice]=sort(ydata);
    for i=1:M
        COV=(i/(i+1))*COV+(i/((i+1)*(i+1)))*pydata(:,:,rindice(i));
        V=diag(COV);
        f=sum(V(1:k)) / sum(V);
        if f > fmax
            break;
        end
        cluster(i)=rindice(i);
        numpoint=numpoint+1;
    end
    cluster=cluster(1:numpoint);
end

% Find the smallest component vector
function [eigenvectors,eigenvalues]=PCA(data)
    [M,N]=size(data);
    mn=mean(data,1);
    newdata=data - repmat(mn,M,1);
    covariance=1/M*newdata'*newdata;
    [PC,V]=eig(covariance);
    V=diag(V);
    [eigenvalues, rindices]=sort(V);
    eigenvectors=zeros(N,N);
    for i=1:length(rindices)
        eigenvectors(:,i)=PC(:,rindices(i));
    end
end

