/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */

package com.xsengine;

import static com.baga.util.Print.*;
import com.xsengine.db.DBOperator;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Set;

/**
 *
 * @author baga
 */
public class WeightContext {
    private DBOperator dbOp;
    private String[] docIds;
    private double[] valTf;
    private String[] docTFD;
    private String table;

    public WeightContext(String tableUsed) {
        table = tableUsed;
    }

    public int getTFQ2(String query, String Context) {
        int tfQ = 0;
        String[] terms = query.split(" ");
        for(String term : terms) {
            tfQ += getTFQ(Context, term);
        }
        return tfQ;
    }

    /*
     * fungsi ini menghitung nilai tf di dokumen
     * input :
     * (String query)
     * output :
     * double[]
     */
    public int getTFQ(String context, String term) {
        int val= 0;
        int lastIndex = context.lastIndexOf('#');
        String ctxSplit = context.substring(0, lastIndex);
        lastIndex = ctxSplit.indexOf('#');
        String realStr = ctxSplit.substring(0, lastIndex);
        String realTerm = ctxSplit.substring(lastIndex+1);
        for(String s : realStr.split("/")) {
            if(term.equalsIgnoreCase(s)) {
                val++;
            }
        }

        if(term.equalsIgnoreCase(realTerm)) {
            val++;
        }
        return val;
    }

    /*
     * fungsi ini bertujuan untuk menghitung term frequency berdasarkan dokumen
     * input :
     * (String term)
     * output :
     * double[]
     */
    private double[] getTFD(String term) {
        dbOp = new DBOperator();
        String docs = dbOp.getPosting(term);
        dbOp.closeConn();
        String[] allDocs = docs.split("~");
        double[] val = new double[allDocs.length];
        for(int i=0; i<allDocs.length; i++) {
            String[] doc = allDocs[i].split("#");
            val[i] = Double.parseDouble(doc[1]);

        }
        return val;
    }

    private HashMap getTFDHash(String term) {
        HashMap hm = new HashMap();
        dbOp = new DBOperator();
        String docs = dbOp.getPosting(term);
        dbOp.closeConn();
        String[] allDocs = docs.split("~");
        for(int i=0; i<allDocs.length; i++) {
            String[] doc = allDocs[i].split("#");
            hm.put(allDocs[i], Double.parseDouble(doc[1]));

        }
        return hm;
    }

    /*
     * fungsi ini sebagai pengganti getTFDHash
     * input :
     * String
     * output :
     * String[]
     */

    private double[] getTFDNoHash(String term) {
        dbOp = new DBOperator();
//        System.out.println(term);
        String docs = dbOp.getPosting(term, table);
        dbOp.closeConn();
        docTFD = docs.split("~");
        double[] tfdVal = new double[docTFD.length];
        for(int i=0; i<docTFD.length; i++) {
            tfdVal[i] = Double.parseDouble(
                    docTFD[i].substring(docTFD[i].lastIndexOf("#") + 1));

        }
        return tfdVal;
    }

   /*
    * fungsi ini bertujuan mengambil dokumen-dokumen apa aja yang diambil
    * dari posting
    * input :
    * void
    * output :
    * String[]
    */

    public String[] getDocTFD() {
        return docTFD;
    }

    /*
     * fungsi ini menghasilkan idf bagi context
     * input :
     * (String context)
     * output :
     * double
     */

    private double getIDFD(String context) {
        double idf = 0.0;
        dbOp = new DBOperator();
        double nValue = dbOp.countDoc();
        // jadi sequential O(n2)
        //String docs = dbOp.getDoc(context);
        dbOp = new DBOperator();
        String docs = dbOp.getPosting(context, table);
        dbOp.closeConn();
        String[] docExpl = docs.split("~");
        idf = Math.log(nValue/docExpl.length);
        return idf;
    }

    /*
     * fungsi ini bertujuan untuk mendapatkan nilai TF.IDF dari query
     * input :
     * (String context)
     * output :
     * double[]
     */
    public double[] getTfIdfD(String context) {
        double idfValue = getIDFD(context);
        double[] tfValue = getTFD(context);

        for(int i=0; i<tfValue.length; i++) {
            tfValue[i] *= idfValue;
        }
        return tfValue;
    }

    public HashMap getTfIdFHash(String context) {
        HashMap hm = getTFDHash(context);
        double idfd = getIDFD(context);
        Set keySet = hm.keySet();
        Iterator iter = keySet.iterator();
        while(iter.hasNext()) {
            String docId = (String) iter.next();
            Double tfDoc = (Double) hm.get(docId);
            tfDoc *= idfd;
            hm.put(docId, tfDoc);
        }
        return hm;
    }

    /*
     * fungsi ini sebagai penggati dari getTfIdFHash. Dengan tanpa menggunakan
     * HashMap
     * input :
     * String
     * output :
     * double[];
     */

    public double[] getTfIdfNoHash(String context, int testCase) {
        double[] allValTFD = getTFDNoHash(context);
        double idfd = getIDFD(context);
        switch (testCase) {
            case 0 :
                for (int i=0; i<allValTFD.length; i++) {
                    allValTFD[i] = 1;
                }
                break;
            case 1 :
                idfd = 1;
                break;
        }
        for(int i=0; i<allValTFD.length; i++) {
            allValTFD[i] *= idfd;
        }
        return allValTFD;
    }

//    public static void main(String[] args) {
//        long mill = System.currentTimeMillis();
//        WeightContext wc = new WeightContext();
//        double[] tempD = wc.getTfIdfNoHash("/catalog/category#new#1");
//        String[] docs = wc.getDocTFD();
//        for(int i=0; i<tempD.length; i++) {
//            print("Dokumennya " + docs[i] + " nilai nya " + tempD[i]);
//        }
//        mill = (System.currentTimeMillis() - mill)/1000;
//        print(mill);
//    }
}
