%Theja added this info on July 15 2010:
%This file implements the naive method of first performing the supervised
%learning and then dealing with the route cost optimization.
%Method 1 where first supervised learning is done using logistic
%regression. And then a route is found.
%Explanation of the solution: it chooses the path 1-2-3-6-4-5 based on
%decepnding probabilities.

% DO THIS BEFORE EXECUTING
addpath(genpath('/media/sda2/Users/Nermine/Desktop/code/sdp/gurobi301/gurobi_mex_v1.20'));

clc;
clear all;
close all;

%% Step1 : Finding the boundary for classification using Logistic regression

% Generating Data 2 gaussian clusters
Ntrain = 1800; % training will contain Ntrain examples.
l = [ones(Ntrain/2,1); -ones(Ntrain/2,1)];              % label #Ntrain
d = [l/2 + randn(Ntrain,1)/1.5  l/2 + randn(Ntrain,1)/1.5]; % data #Ntrain

triangledataflag=1;
if(triangledataflag==1)
    %or generate two traingular data clusters to illustrate the point
    %get a square
    xrandgen1 = rand(Ntrain,1);
    xrandgen2 = rand(Ntrain,1);
    xrandgen = [xrandgen1 xrandgen2 ones(Ntrain,1)];
    poslowerTri = find(xrandgen(:,1)<xrandgen(:,2));
    posupperTri = find(xrandgen(:,1)>=xrandgen(:,2));
    %translate the points appropriately
    T = [1 0 0.99;
         0 1 -0.99;
         0 0 1];
    xpositionedpart = xrandgen(poslowerTri,:)*T';
    % plot(xpositionedpart(:,1),xpositionedpart(:,2),'b.'); hold on;
    % plot(xrandgen(posupperTri,1),xrandgen(posupperTri,2),'g.'); hold off;
    xpositionfull = [xpositionedpart;xrandgen(posupperTri,:)];
    xpositionfull = 3*xpositionfull*[cos(90*pi/180) -sin(90*pi/180) 0; sin(90*pi/180) cos(90*pi/180) 0; 0 0 1 ]';
    d = xpositionfull*[1 0 0; 0 1 -1; 0 0 1 ]';
    d = d(:,1:2);
    %plot(d(:,1),d(:,2),'.');
    l  = [ones(length(xpositionedpart),1); -ones(Ntrain-length(xpositionedpart),1)];
    flipprob = rand(Ntrain,1);
    for i=1:length(flipprob)
        if(flipprob(i)>0.9) 
            l(i) = l(i)*-1;
        end
    end
end

trainingdata = [d(1:Ntrain,:) l(1:Ntrain)];             % appended #Ntrain

B = glmfit(trainingdata(:,1:2), [0.5*trainingdata(:,3)+0.5 ones(length(trainingdata(:,3)),1)], 'binomial', 'link', 'logit');
Ftrain=trainingdata(:,1:2)*B(2:end) + B(1);   %%%% This is the model
trainingloss = sum(log(1+exp(-(trainingdata(:,3).*Ftrain))));

%first 5 test points
lt = [1; 1; 1; -1; -1];
dt = [lt/2 + randn(5,1)/5  lt/2 + randn(5,1)/5];
testdata = [dt lt];
%the 6th point
xt = [-1:0.1:1];                          epsilonval = 1e-6;
m = -(B(2)/(B(3) + epsilonval));    c = -B(1)/(B(3)+epsilonval);                     
yt = m*xt +c;
testdata(6,:) = [xt(end) yt(end) -1];

% Plot of the training and the 5 points.
plotrequired = 1;
if(plotrequired==1)
    figure;
    pos = find(trainingdata(:,3)==1);
    plot(trainingdata(pos,1),trainingdata(pos,2),'b.');
    pos = find(trainingdata(:,3)==-1);  hold on; 
    plot(trainingdata(pos, 1),trainingdata(pos, 2),'r.');
    pos = find(testdata(:,3)==1);
    plot(testdata(pos,1),testdata(pos,2),'yo');
    pos = find(testdata(:,3)==-1);  hold on; 
    plot(testdata(pos, 1),testdata(pos, 2),'mo');
    plot(xt,yt);
    axis([-3 3 -3 3]);                  hold off;
end


%Find probabilities q on test data (of size 6)
Ftest=testdata(:,1:2)*B(2:end) + B(1);   %%%% This is the model
q=ones(length(testdata(:,3)),1)./(ones(length(testdata(:,3)),1)+exp(-Ftest));

%Plot to visualize the probability surface. needs xt vector from above.
% x2 = -1:0.1:1;
% for i=1:length(xt)
%     for j=1:length(x2)
%         psurf(i,j) = 1./(1+exp(-(xt(i)*B(2) + x2(j)*B(end) + B(1))));
%     end
% end
% figure; surf(psurf)

%for legacy.
pos_test = [1 2 3 4 5 6];
pos_l1 = [1 2 3];
pos_l0 = [4 5];
pos_t6 = 6;

flagstep2 = 1;
if(flagstep2==1)
%% Step2: Computing the routecost

    q = q'
    N = 6;
    M = 100000;

    %weights
    %q = [1 1 1 1 1 1];

%     C = [0 12  M  M  9 16; %Hardcoded N here
%         12  0 19 12  M 15;
%          M 19  0 21  M 17; 
%          M 12 21  0 10 16;
%          9  M  M 10  0 10;
%         16 15 17 16 10  0];
    C = [0  7  7 10 10 12;  %Hardcoded N
         7  0  7 10 10 12;
         7  7  0 10 10 12; 
        10 10 10  0  7 12;
        10 10 10  7  0 12;
        12 12 12 12 12  0];
    %TODO: change 1,2,3 to N(10,2) , 4,5 to N(10,2) and so on.

    c = [reshape(C,N^2,1);zeros(N^2,1)];% gurobi input: vectorized form of C Matrix

    objtype = 1;                        % gurobi input: 1 for minimize, -1 for maximize

    % Number of variables: integers N^2 for xij and N^2 binary for yij (could be N^2-N each)
    % Creating the R matrix required for one of the constraints

    R = (sum(q)-q(1))*ones(N,N);
    for i=1:N
        for j=1:N
            if(j==1)
                R(i,j) = q(1);
            end
            if(i==1)
                R(i,j) = sum(q);
            end
        end
    end


    A00 = [diag([1 zeros(1,N-1)]) zeros(N)];
    A01 = [zeros(N) diag([1 zeros(1,N-1)])];
    for i=1:N
     A0(i,:) = reshape(circshift(A00,[i-1,i-1]),2*N^2,1)'; 
     A0(i+N,:) = reshape(circshift(A01,[i-1,i-1]),2*N^2,1)';
    end

    A2(1,:) = [zeros(1,N^2) ones(1,N) zeros(1,N^2-N)];                  %colsum of yij
    A1(1,:) = [zeros(1,N^2) reshape([ones(N,1) zeros(N,N-1)]',1,N^2)];  %rowsum of yij
    for i=2:N
        A2(i,:) = circshift(A2(1,:)',N*(i-1))';
        A1(i,:) = circshift(A1(1,:)',i-1)';
    end

    A3 = [ones(N,1); zeros(N^2-N,1); zeros(N^2,1)]'; % the Nth leg flow value is 1 back to the first node. column sum


    A42(1,:) = [ones(1,N) zeros(1,N^2-N) zeros(1,N^2)];                 %colsum of xij
    A41(1,:) = [reshape([ones(N,1) zeros(N,N-1)]',1,N^2) zeros(1,N^2)]; %rowsum of xij
    A4(1,:) = A42(1,:) - A41(1,:);
    for i=2:N
        A42(i,:) = circshift(A42(1,:)',N*(i-1))';
        A41(i,:) = circshift(A41(1,:)',i-1)';
        A4(i,:) = A42(i,:)-A41(i,:);
    end
    bA4 = q';
    bA4(1) = bA4(1) - sum(q);

    A5 = zeros(N^2,2*N^2);
    for i=1:N
        for j=1:N
            A50 = [zeros(N) zeros(N)];
            A50(i,j) = 1;           % for xij
            A50(i,j+N) = -R(i,j);   % for yij
            A5(N*(i-1)+j,:) = reshape(A50,2*N^2,1)';
        end
    end

    randomA6 = [reshape([ones(1,N); zeros(N-1,N)],1,N^2) zeros(1,N^2)]; % the 1st leg flow value is N exiting from first node. row sum

    A =  sparse([A0; A1; A2; A3; A4; A5]);
    b = [zeros(2*N,1); ones(N,1);ones(N,1); q(1); bA4; zeros(N^2,1)];
    contypes = '===============================<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<';


    lb = zeros(2*N^2,1); % scalar means a uniform lower bound equal to scalar (which is zero here)
    ub = [sum(q)*ones(N^2,1);ones(N^2,1)]; % using loosely somewhat. Shoudl Rij figure here?
    vtypes = 'CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB';

    clear opts
    opts.IterationLimit = 4000;
    opts.FeasibilityTol = 1e-6;
    opts.IntFeasTol = 1e-5;
    opts.OptimalityTol = 1e-6;
    opts.LPMethod = 1;         % 0 - primal, 1 - dual
    opts.Presolve = -1;        % -1 - auto, 0 - no, 1 - conserv, 2 - aggressive
    opts.Display = 1;
    %opts.LogFile = 'weighted_fischetti_gurobi_mex_MIP.log';
    %opts.WriteToFile = 'weighted_fischetti_gurobi_mex_MIP.mps';

    [x,val,exitflag,output] = gurobi_mex(c,objtype,A,b,contypes,lb,ub,vtypes,opts);
    if(exitflag==2)
        %reshape(x(1:36),6,6)
        round(reshape(x(37:72),6,6))
        q
    end
end









%% Syntax for Gurobi:
%     x = gurobi_mex(c, objtype, A, b, contypes, lb, ub, vartypes, options); 
%     *  c: objective coefficient vector, double. 
%     [] (empty array) means uniformly 0 coefficients, and scalar means all coefficients equal to scalar.  
%     * objtype: 1 (minimization) or -1 (maximization).
%     * A: constraint coefficient matrix, double, sparse.
%     * b: constraint right-hand side vector, double. 
%     * contypes: constraint types. Char array of '>', '<', '='. 
%     * lb: variable lower bound vector, double. 
%     * ub: variable upper bound vector, double. 
%     * vartypes: variable types. Char array of chars 'C', 'B', 'I', 'S', 'N'. C for continuous; B for binary; I for integer; S for semi-continuous; N for semi-integer. [] (empty array) means all variables are continuous. 
% Output Description
%     * x: primal solution vector; empty if Gurobi encounters errors or stops early (in this case, check output flag).
%     * val: optimal objective value; empty if Gurobi encounters errors or
%     stops early.