﻿// Learn more about F# at http://fsharp.net

open Microsoft.SolverFoundation.Common
open Microsoft.SolverFoundation.Solvers
open Microsoft.SolverFoundation.Services
open Microsoft.SolverFoundation.FSharpDSL.Language
open Microsoft.SolverFoundation.FSharpDSL.Compiler


open System


type dataset = 
    { features: float array array; // (instance = float array) array
      mutable labels: int array; // 
    }
    with
    member x.NumSamples = x.features.Length


module Array = 
    let median (a:'a array) = 
        let sorted = Array.sort a
        sorted.[sorted.Length / 2]
        
module Kernel = 
    let linear a b = 
        Array.fold2 (fun acc p q -> acc + p * q) 0.0 a b

    let polynomial k a b = 
        let dot = linear a b
        Math.Pow(1.0 + dot, k |> float)

    let gaussian beta a b = 
        let diff = Array.fold2 (fun acc p q -> acc + (p-q)*(p-q)) 0.0 a b
        exp (-0.5 * beta * diff)

module SVM = 
    type svmmodel = {
        SVs:dataset;
        alpha:float array;
        kernel: float[] -> float[] -> float;
        w0:float;
        }
        with
        member x.NumSupporVectors = x.SVs.features.Length


    let buildSVM (ds:dataset) (C:float) (kernel:float[] -> float[] -> float) = 

        let n = ds.features.Length
        // let C = Rational.op_Implicit(C)
        let zero = Rational.Zero

        let y = Array.init ds.NumSamples (fun i -> float (ds.labels.[i]))
        let coef = Array2D.init ds.NumSamples ds.NumSamples (fun i j -> 
            0.5 * (y.[i] * y.[j] |> float) * (kernel ds.features.[i] ds.features.[j]))

        // solve the SVM using ODSL
        let dsl_solver = 
            let index = [ 0..n-1]
            Solver 
                <@
                    qp()
                    let alpha = vararray1 (index)
                    maximise (sum index (fun i -> alpha.[i]) - 
                                (sum index (fun i -> sum index (fun j -> 
                                    coef.[i,j] * alpha.[i] * alpha.[j])))) // Eq. (1)
                    where 
                        [
                            foreach index (fun i -> 0.0 <= alpha.[i]) // Eq. (2)
                            foreach index (fun i -> alpha.[i] <= C) // Eq. (2)
                            sum index (fun i -> alpha.[i] * y.[i]) = 0. // Eq. (3)
                        ]
                @>

        dsl_solver.Solve()

(*
        // THE CODE DIRECTLY USING MSF API
        // create a interior point solver, which solves the QP problem
        let solver = new InteriorPointSolver()

        // set the objective value / goal
        let _, goal = solver.AddRow("dual objective value")

        // false == maximizing the objective value
        // the value of goal is (1)
        solver.AddGoal(goal, 0, false) |> ignore

        // add the Lagangian variables \alpha_i and set their bounds (0 <= \alpha_i <= C)
        let alpha = Array.create n 0
        for i=0 to n-1 do 
            let _, out = solver.AddVariable("alpha_"+i.ToString())
            alpha.[i] <- out
            solver.SetBounds(out, zero, C)

        // add contraint: \sum_i \alpha_i * y_i = 0
        // equation (2)
        let _, sumConstraint = solver.AddRow("SumConstraint")
        solver.SetBounds(sumConstraint, zero, zero);

        for i=0 to n-1 do
            // set the coefs for the sum constraint
            // equation (2)
            solver.SetCoefficient(sumConstraint, alpha.[i], Rational.op_Implicit(ds.labels.[i]))

            // add the \alpha_i terms into the objective
            solver.SetCoefficient(goal, alpha.[i], Rational.One)

            // add the qudratic terms
            for j=0 to i do 
                // coef = y_i * y_j * K(x_i, x_j)
                let coef = float(ds.labels.[i] * ds.labels.[j]) * (kernel ds.features.[i] ds.features.[j])
                if i=j then
                    solver.SetCoefficient(goal, Rational.op_Implicit(-0.5 * coef), alpha.[i], alpha.[j])
                else
                    solver.SetCoefficient(goal, Rational.op_Implicit(-coef), alpha.[i], alpha.[j])
                
        // use the default parameters 
        let param = new InteriorPointSolverParams()

        
        solver.Solve(param) |> ignore

        // get the alpha values out
        let alphaValue = Array.init n (fun i -> solver.GetValue(i+1))
*)

        // get the alphas from the DSL
        let variables = dsl_solver.Variables |> Seq.toArray
        let alphaValue = Array.init n (fun i -> dsl_solver.[variables.[i]])
        let C = Rational.op_Implicit(C)
      
    (* print optimization result
        printfn "goal value = %A" (solver.GetValue(0).ToDouble())
        for i=1 to n do
            printfn "%A" (solver.GetValue(i).ToDouble())
    *)
        let alphaNew = new ResizeArray<Rational>()
        // extract the non-zero alpha values out and their corresponding support vectors

        
        let SVs = 
            let feats = new ResizeArray<float[]>()
            let labs = new ResizeArray<int>()
            let maxAlpha = Array.max alphaValue
            let threshold = maxAlpha * Rational.op_Implicit(1e-8)
            for i=0 to n-1 do 
                if alphaValue.[i] > threshold then
                    feats.Add(ds.features.[i])
                    labs.Add(ds.labels.[i])
                    alphaNew.Add(alphaValue.[i])

            { features = feats |> Seq.toArray;
              labels = labs |> Seq.toArray;
            }

        // solve w_0 in the primal form
    
        let alphaNZ = alphaNew |> Seq.toArray

        // equation (5)
        let w0 = 
            alphaNZ
            |> Array.mapi (fun i a -> 
                if a = C then 
                    None
                else 
                    let mutable tmp = 0.0
                    for j=0 to SVs.NumSamples-1 do
                        tmp <- tmp + alphaNZ.[j].ToDouble() * (SVs.labels.[j] |> float) * (kernel SVs.features.[i] SVs.features.[j])
                    Some ((float SVs.labels.[i]) - tmp)
                )
            |> Array.filter (fun v -> match v with None -> false | _ -> true)
            |> Array.map (fun v -> match v with Some v -> v | _ -> 0.0)
            |> Array.median
        

        // construct an svm record
        {
            SVs = SVs;
            alpha = alphaNZ |> Array.map (fun v -> v.ToDouble());
            kernel = kernel;
            w0 = w0;
        }
        

    let svmClassify (model:svmmodel) (ds:dataset) = 
        // equation (4)
        let vals = 
            ds.features
            |> Array.map (fun x ->
                let mutable sum = 0.0
                for i=0 to model.NumSupporVectors-1 do
                    sum <- sum + model.alpha.[i] * (float model.SVs.labels.[i]) * (model.kernel model.SVs.features.[i] x)
                sum + model.w0
                )
        let nCorrect = 
            Array.map2 (fun value label -> if (value > 0.0) && (label = 1) || (value < 0.0) && (label = -1) then 1 else 0) vals ds.labels
            |> Array.sum
    
        (float nCorrect) / (float ds.NumSamples), vals




let data = 
    {
        features = 
            let lines = IO.File.ReadAllLines @"D:\users\v-yizhu\fdataminingBlog\data\2dsvm\X.txt"
            lines
            |> Array.map (fun line ->
                let s = line.Split([| ' ' |], System.StringSplitOptions.RemoveEmptyEntries)
                [| s.[0] |> float; s.[1] |> float |]
                );
        labels = 
            let lines = IO.File.ReadAllLines @"D:\users\v-yizhu\fdataminingBlog\data\2dsvm\y.txt"
            lines
            |> Array.map int
    }
        

open SVM
let svm = buildSVM data 10.0 Kernel.linear 
let classifyResult = svmClassify svm data
printfn "%A" classifyResult
