﻿#r @"C:\Program Files (x86)\Sho 2.0 for .NET 4\packages\Optimizer\Microsoft.Solver.Foundation.dll"
open Microsoft.SolverFoundation.Common
open Microsoft.SolverFoundation.Solvers
open Microsoft.SolverFoundation.Services


// test QP
module TestQP = 
(* minimize x^2 + y^2 + 3xy + 2x + y *)
(* notice that in the InteriorPointSolver,
   the coefficients for xy & yx should be the same (so only set ONCE!)
   if we set both 3xy and 0yx, the solver takes the later coef. 
*)
    let solver = new InteriorPointSolver()
    let _, goal = solver.AddRow("dual objective value")
    solver.AddGoal(goal, 0, true)

    let _, x = solver.AddVariable("x")
    let _, y = solver.AddVariable("y")

    solver.SetCoefficient(goal, x, Rational.op_Implicit(2))
    solver.SetCoefficient(goal, y, Rational.op_Implicit(1))
    // for terms like x-y (where x != y), set its coef for only one time!
    solver.SetCoefficient(goal, Rational.op_Implicit(3), x, y)
    //solver.SetCoefficient(goal, Rational.Zero, y, x)
    solver.SetCoefficient(goal, Rational.op_Implicit(1), x, x)
    solver.SetCoefficient(goal, Rational.op_Implicit(1), y, y)
    
    
    let param = new InteriorPointSolverParams()
    solver.Solve(param) |> ignore

    //solver.Result
    let objectiveValue = solver.GetValue(0).ToDouble()

    let x0 = solver.GetValue(1).ToDouble()
    let y0 = solver.GetValue(2).ToDouble()
    x0*x0 + y0*y0 + 3.0 * x0 * y0 + 2. * x0  + y0
    //x0*x0 + y0*y0 + 0.0 * x0 * y0 + 2. * x0  + y0




open System


type dataset = 
    { features: float array array; // (instance = float array) array
      mutable labels: int array; // 
    }
    with
    member x.NumSamples = x.features.Length


module Array = 
    let median (a:'a array) = 
        let sorted = Array.sort a
        sorted.[sorted.Length / 2]
        
module Kernel = 
    let linear a b = 
        Array.fold2 (fun acc p q -> acc + p * q) 0.0 a b

    let polynomial k a b = 
        let dot = linear a b
        Math.Pow(1.0 + dot, k |> float)

    let gaussian beta a b = 
        let diff = Array.fold2 (fun acc p q -> acc + (p-q)*(p-q)) 0.0 a b
        exp (-0.5 * beta * diff)

module SVM = 
    type svmmodel = {
        SVs:dataset;
        alpha:float array;
        kernel: float[] -> float[] -> float;
        w0:float;
        }
        with
        member x.NumSupporVectors = x.SVs.features.Length


    let buildSVM (ds:dataset) (C:float) (kernel:float[] -> float[] -> float) = 

        let n = ds.features.Length
        let C = Rational.op_Implicit(C)
        let zero = Rational.Zero

        // create a interior point solver, which solves the QP problem
        let solver = new InteriorPointSolver()

        // set the objective value / goal
        let _, goal = solver.AddRow("dual objective value")

        // false == maximizing the objective value
        // the value of goal is (1)
        solver.AddGoal(goal, 0, false) |> ignore

        // add the Lagangian variables \alpha_i and set their bounds (0 <= \alpha_i <= C)
        let alpha = Array.create n 0
        for i=0 to n-1 do 
            let _, out = solver.AddVariable("alpha_"+i.ToString())
            alpha.[i] <- out
            solver.SetBounds(out, zero, C)

        // add contraint: \sum_i \alpha_i * y_i = 0
        // equation (2)
        let _, sumConstraint = solver.AddRow("SumConstraint")
        solver.SetBounds(sumConstraint, zero, zero);

        for i=0 to n-1 do
            // set the coefs for the sum constraint
            // equation (2)
            solver.SetCoefficient(sumConstraint, alpha.[i], Rational.op_Implicit(ds.labels.[i]))

            // add the \alpha_i terms into the objective
            solver.SetCoefficient(goal, alpha.[i], Rational.One)

            // add the qudratic terms
            for j=0 to i do 
                // coef = y_i * y_j * K(x_i, x_j)
                let coef = float(ds.labels.[i] * ds.labels.[j]) * (kernel ds.features.[i] ds.features.[j])
                if i=j then
                    solver.SetCoefficient(goal, Rational.op_Implicit(-0.5 * coef), alpha.[i], alpha.[j])
                else
                    solver.SetCoefficient(goal, Rational.op_Implicit(-coef), alpha.[i], alpha.[j])
                
        // use the default parameters 
        let param = new InteriorPointSolverParams()

        
        solver.Solve(param) |> ignore

        // get the alpha values out
        let alphaValue = Array.init n (fun i -> solver.GetValue(i+1))

    (* print optimization result
        printfn "goal value = %A" (solver.GetValue(0).ToDouble())
        for i=1 to n do
            printfn "%A" (solver.GetValue(i).ToDouble())
    *)
        let alphaNew = new ResizeArray<Rational>()
        // extract the non-zero alpha values out and their corresponding support vectors

        
        let SVs = 
            let feats = new ResizeArray<float[]>()
            let labs = new ResizeArray<int>()
            let maxAlpha = Array.max alphaValue
            let threshold = maxAlpha * Rational.op_Implicit(1e-8)
            for i=0 to n-1 do 
                if alphaValue.[i] > threshold then
                    feats.Add(ds.features.[i])
                    labs.Add(ds.labels.[i])
                    alphaNew.Add(alphaValue.[i])

            { features = feats |> Seq.toArray;
              labels = labs |> Seq.toArray;
            }

        // solve w_0 in the primal form
    
        let alphaNZ = alphaNew |> Seq.toArray

        // equation (5)
        let w0 = 
            alphaNZ
            |> Array.mapi (fun i a -> 
                if a = C then 
                    None
                else 
                    let mutable tmp = 0.0
                    for j=0 to SVs.NumSamples-1 do
                        tmp <- tmp + alphaNZ.[j].ToDouble() * (SVs.labels.[j] |> float) * (kernel SVs.features.[i] SVs.features.[j])
                    Some ((float SVs.labels.[i]) - tmp)
                )
            |> Array.filter (fun v -> match v with None -> false | _ -> true)
            |> Array.map (fun v -> match v with Some v -> v | _ -> 0.0)
            |> Array.median
        

        // construct an svm record
        {
            SVs = SVs;
            alpha = alphaNZ |> Array.map (fun v -> v.ToDouble());
            kernel = kernel;
            w0 = w0;
        }
    
    let svmDisFunctionSingle (model:svmmodel) (vec: float array) = 
        let mutable sum = 0.0
        for i=0 to model.NumSupporVectors-1 do
            sum <- sum + model.alpha.[i] * (float model.SVs.labels.[i]) * (model.kernel model.SVs.features.[i] vec)
        sum + model.w0

    let svmDisFunction (model:svmmodel) (data:float array array) = 
        data
        |> Array.map (svmDisFunctionSingle model)

    let svmClassify (model:svmmodel) (ds:dataset) = 
        // equation (4)
        let vals = 
            ds.features |> svmDisFunction model
            
        let nCorrect = 
            Array.map2 (fun value label -> if (value > 0.0) && (label = 1) || (value < 0.0) && (label = -1) then 1 else 0) vals ds.labels
            |> Array.sum
    
        (float nCorrect) / (float ds.NumSamples), vals








module TestSVM = 
    open SVM
    open Kernel

    let data = 
        {
            features = 
                let lines = IO.File.ReadAllLines @"C:\Users\v-yizhu\Desktop\X.txt"
                lines
                |> Array.map (fun line ->
                    let s = line.Split '\t'
                    [| s.[0] |> float; s.[1] |> float |]
                    );
            labels = 
                let lines = IO.File.ReadAllLines @"C:\Users\v-yizhu\Desktop\y.txt"
                lines
                |> Array.map int
        }
        
    let svm = buildSVM data 10.0 Kernel.linear 
    let classifyResult = svmClassify svm data

    // LogReg.fsx contains code to get iris data
    let svm = buildSVM iris 10.0 Kernel.linear 
    let classifyResult = svmClassify svm iris

    svm.NumSupporVectors

    data



// the following code example shows how to visualize the svm
#r "MathFunc.dll"
#r "MatrixInterf.dll"
#r "ShoArray.dll"
#r "ShoViz.dll"
#r "PythonExt.dll"
#r "System.Windows.Forms.DataVisualization.dll"
#r "System.Windows.Forms.dll"
#r "System.Drawing.dll"
#r @"D:\users\v-yizhu\fsharpsho\FSharpExt\bin\Debug\FSharpExt.dll"

//#I @"D:\users\v-yizhu\fsharpsho\FSharpExt\bin\Release"

System.Environment.SetEnvironmentVariable("SHODIR", @"C:\Program Files (x86)\Sho 2.0 for .NET 4")


open ShoNS.FSharpExt
open ShoNS.Array
open System
let data = 
    {
        features = 
            let lines = IO.File.ReadAllLines @"D:\users\v-yizhu\fdataminingBlog\data\2dsvm\X.txt"
            lines
            |> Array.map (fun line ->
                let s = line.Split([|" "|], System.StringSplitOptions.RemoveEmptyEntries)
                [| s.[0] |> float; s.[1] |> float |]
                );
        labels = 
            let lines = IO.File.ReadAllLines @"D:\users\v-yizhu\fdataminingBlog\data\2dsvm\y.txt"
            lines
            |> Array.map int
    }

let X = DoubleArray.From(data.features)

let XPos, XNeg = 
    let xy1, xy2 = 
        let t = Array.zip data.features data.labels
        t |> Array.filter (fun (_, y) -> y=1), t |> Array.filter (fun (_, y) -> y = -1)
    let x1, _ = Array.unzip xy1
    let x2, _ = Array.unzip xy2
    DoubleArray.From(x1), DoubleArray.From(x2)
        

Plot.figure()
Plot.hold()
Plot.plot(XPos.[0.., 0..0], XPos.[0.., 1..1], ".r")
Plot.plot(XNeg.[0.., 0..0], XNeg.[0.., 1..1], "+b")


let svm = SVM.buildSVM data 10.0 Kernel.linear 
let SV = DoubleArray.From(svm.SVs.features)
let plt = Plot.plot(SV.[0.., 0..0], SV.[0.., 1..1], "o", "size" --> 12.0)
plt.XTitle <- " X1 value"
plt.YTitle <- " X2 value"
plt.Title <- "SVM demonstration"

let minX1, maxX1 = FS.min(X.[0..,0..0]) :?> float, FS.max(X.[0.., 0..0]) :?> float
let minX2, maxX2 = FS.min(X.[0..,1..1]) :?> float, FS.max(X.[0.., 1..1]) :?> float

let m = 50
let mF = m |> float

let x1range = FS.drange(minX1, maxX1, stepSize = (maxX1-minX1)/mF) |> DoubleArray.From
let x2range = FS.drange(minX2, maxX2, stepSize = (maxX2-minX2)/mF) |> DoubleArray.From

let Z = FS.zeros(m+1, m+1)

for i=0 to m do 
    for j=0 to m do 
        Z.[i,j] <- SVM.svmDisFunctionSingle svm [|x1range.[i]; x2range.[j] |]


Plot.figure()

open ShoNS.Visualization
let p = ShoPlotHelper.Contour(Z, [1.0; 1.0])
Plot.contour(Z, contour_vals = [|0.0; 0.0|], bounds = [minX1 |> float32; minX2 |> float32 ; maxX1 |> float32; maxX2 |> float32])
ShoPlotHelper.Contour(Z, [0.0; 0.0])
ShoPlotHelper.Contour(Z, [-1.0; -1.0])
