package Commons;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Random;

import org.netlib.util.intW;

import Commons.IOUtil;
import weka.classifiers.evaluation.Evaluation;
import weka.classifiers.functions.LibLINEAR;
import weka.core.Instance;
import weka.core.Instances;
import weka.core.converters.ArffLoader;
import weka.core.converters.ArffSaver;
import weka.core.converters.CSVLoader;

public class GenerateCsv {

    public String csvHeader = "Topic1,Topic2,Topic3,Topic4,Topic5,Topic6,Topic7,Topic8,Topic9,Topic10,Topic11,Topic12,Topic13,Topic14,Topic15,Topic16,Topic17,Topic18,Topic19,Topic20,Topic21,Topic22,Topic23,Topic24,Topic25,Topic26,Topic27,Topic28,Topic29,Topic30,Topic31,Topic32,Topic33,Topic34,Topic35,Topic36,Topic37,Topic38,Topic39,Topic40,Topic41,Topic42,Topic43,Topic44,Topic45,Topic46,Topic47,Topic48,Topic49,Topic50,Topic51,Topic52,Topic53,Topic54,Topic55,Topic56,Topic57,Topic58,Topic59,Topic60,Topic61,Topic62,Topic63,Topic64,Topic65,Topic66,Topic67,Topic68,Topic69,Topic70,Topic71,Topic72,Topic73,Topic74,Topic75,Topic76,Topic77,Topic78,Topic79,Topic80,Topic81,Topic82,Topic83,Topic84,Topic85,Topic86,Topic87,Topic88,Topic89,Topic90,Topic91,Topic92,Topic93,Topic94,Topic95,Topic96,Topic97,Topic98,Topic99,Topic100,Label";

    public void classification(String arffFile, String resFile) throws IOException {
        BufferedWriter writer = IOUtil.getWriter(resFile);

//		String path = "datas";
//		String filename = path + File.separator + arffFile;
        String filename = arffFile;
        ArffLoader loader = new ArffLoader();
        try {
            loader.setFile(new File(filename));
            Instances dataset = loader.getDataSet();
            dataset.setClass(dataset.attribute("Label"));
            // remove
            List<Instance> removeList = new ArrayList<Instance>();
            Enumeration<Instance> en = dataset.enumerateInstances();
            while (en.hasMoreElements()) {
                Instance ins = en.nextElement();
                String label = ins.classAttribute().value((int) ins.classValue());
                if (label.matches("(employment)|(travel)|(school)")) removeList.add(ins);
            }
            for (Instance ins : removeList) {
                dataset.remove(ins);
            }
            //
            LibLINEAR svm = new LibLINEAR();
            try {
                Evaluation eval = new Evaluation(dataset);
                System.out.println(new Date());
                eval.crossValidateModel(svm, dataset, 10, new Random(1));
                System.out.println(
                        "----------------eval start-------------------");
                System.out.println(eval.toClassDetailsString());
                System.out.println(eval.toSummaryString());
                System.out.println(eval.toMatrixString());
                System.out.println(new Date());
                writer.append(eval.toClassDetailsString() + "\n");
                writer.append(eval.toSummaryString() + "\n");
                writer.append(eval.toMatrixString() + "\n");
                writer.flush();
                writer.close();
            } catch (Exception e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
        } catch (IOException e) {
            // TODO Auto-generated catch block
            e.printStackTrace();
        }
    }

    public void csvToArff(String path, String csvFileName) throws IOException {
        String source = csvFileName;
        String target = path;
        Instances instances = null;
        CSVLoader in = new CSVLoader();
        ArffSaver out = new ArffSaver();
        try {
            System.out.println(new Date());
            in.setFile(new File(source));
            out.setFile(new File(target));
            instances = in.getDataSet();
            instances.setClassIndex(instances.attribute("Label").index());
            out.setInstances(instances);
            out.writeBatch();
            System.out.println(new Date());
        } catch (IOException e) {
            e.printStackTrace();
        }
        //	this.classification(target, path+"\\"+csvFileName+".res");
    }

    public void generateCsvForLDA(String theta, String txt, double Train_rates, int filternum) throws IOException {
        BufferedWriter Train_writer = IOUtil.getWriter(theta + ".csv");// unbalance_lda_car_80.csv
//		BufferedWriter test_writer = IOUtil.getWriter(theta+".test.csv");// unbalance_lda_car_80.csv
        String mycsvHeader = "";
        //writer.append(mycsvHeader+"\n");
        BufferedReader thetaReader = IOUtil.getReader(theta);// "D:\\sogounews\\unbalance\\topic\\car-80\\model-final.theta"
        BufferedReader newsReader = IOUtil.getReader(txt);// "D:\\sogounews\\unbalance\\SogouCAReducedContent_unbalanced_car_80.txt"

//		HashMap<String, ArrayList<String>> dataSet_all =new HashMap<String, ArrayList<String>>();
        HashMap<String, ArrayList<String>> dataSet_train = new HashMap<String, ArrayList<String>>();
//		HashMap<String, ArrayList<String>> dataSet_test =new HashMap<String, ArrayList<String>>();
        String dataLine = null, tagLine = null;
        ArrayList<String[]> data_line_list = new ArrayList<String[]>();
        HashMap<String, String> tagMap = new HashMap<String, String>();
        while ((dataLine = thetaReader.readLine()) != null) {
            tagLine = newsReader.readLine();
            if (mycsvHeader.equals("")) {
                int wordcount = dataLine.split("\\s+").length;
                for (int pp = 0; pp < wordcount; pp++) {
                    mycsvHeader += "t" + pp + ",";
                }
                mycsvHeader += "Label";
                Train_writer.append(mycsvHeader + "\n");
            }
            if (!tagMap.containsKey(tagLine)) {
                tagMap.put(tagLine, "c_" + tagMap.size());
            }

            String[] tmps = new String[2];
            tmps[0] = dataLine.trim().replaceAll(" ", ",");
            tmps[1] = tagMap.get(tagLine);
            data_line_list.add(tmps);
        }
        thetaReader.close();
        newsReader.close();

        Collections.shuffle(data_line_list);

        int split_num = (int) (data_line_list.size() * Train_rates);

        for (int i = 0; i < split_num; i++) {
            String theTag = data_line_list.get(i)[1];
            if (!dataSet_train.containsKey(theTag)) {
                dataSet_train.put(theTag, new ArrayList<String>());
            }
            dataSet_train.get(theTag).add(data_line_list.get(i)[0]);
        }

        for (String key : dataSet_train.keySet()) {
            int the_num = 0;
            for (String oneLine : dataSet_train.get(key)) {
                if (the_num++ >= filternum)
                    break;
                Train_writer.append(oneLine);
                Train_writer.append(",");
                Train_writer.append(key);
                Train_writer.append("\n");
            }
        }

        for (int i = split_num; i < data_line_list.size(); i++) {
            Train_writer.append(data_line_list.get(i)[0]);
            Train_writer.append(",");
            Train_writer.append(data_line_list.get(i)[1]);
            Train_writer.append("\n");
        }

        Train_writer.close();
    }

    public void generateCsvForLDA(String theta, String txt)
            throws IOException {
        BufferedWriter writer = IOUtil.getWriter(theta + ".csv");// unbalance_lda_car_80.csv
        String mycsvHeader = "";
        //writer.append(mycsvHeader+"\n");
        BufferedReader thetaReader = IOUtil.getReader(theta);// "D:\\sogounews\\unbalance\\topic\\car-80\\model-final.theta"
        BufferedReader newsReader = IOUtil.getReader(txt);// "D:\\sogounews\\unbalance\\SogouCAReducedContent_unbalanced_car_80.txt"
        ArrayList<Integer> taglist = new ArrayList<Integer>();
        HashMap<String, Integer> tagMap = new HashMap<String, Integer>();
        String tagLine = null;
        while ((tagLine = newsReader.readLine()) != null) {
            String category = tagLine.trim();
            if (!tagMap.containsKey(category)) {
                tagMap.put(category, tagMap.size() + 1);
            }
            taglist.add(tagMap.get(category));
        }


        String tLine = thetaReader.readLine();
        int tag_index = 0;
        while (tLine != null) {
            StringBuilder sb = new StringBuilder();
            tLine = tLine.trim();
            if (mycsvHeader.equals("")) {
                int wordcount = tLine.split("\\s+").length;
                for (int pp = 0; pp < wordcount; pp++) {
                    mycsvHeader += "t" + pp + ",";
                }
                mycsvHeader += "Label";
                writer.append(mycsvHeader + "\n");
            }
            tLine = tLine.replaceAll("\\s+", ",");
            sb.append(tLine + ",c_" + taglist.get(tag_index++));
            writer.append(sb.toString() + "\n");
            tLine = thetaReader.readLine();

        }
        writer.flush();
        writer.close();
    }

    public void generateCsvForWordnet(String thetaFile, String wordsFile, String txtFile, String csvFile) throws IOException {

        BufferedReader thetaReader = IOUtil.getReader(thetaFile);// "D:\\sogounews\\unbalance\\reweighted\\car-20\\model-final.theta"
        BufferedReader wordsReader = IOUtil.getReader(wordsFile);// "D:\\sogounews\\unbalance\\reweighted\\car-20\\SogouCAReducedContent_unbalanced_car_20.words"

        String theta = thetaReader.readLine();
        String word = wordsReader.readLine();

        HashMap<String, Double[]> word2theta = new HashMap<String, Double[]>();
        while (theta != null && word != null) {
            String ss[] = theta.trim().split(" ");
            Double dd[] = new Double[ss.length];
            for (int i = 0; i != ss.length; i++)
                dd[i] = Double.valueOf(ss[i]);
            word2theta.put(word, dd);
            theta = thetaReader.readLine();
            word = wordsReader.readLine();
        }

        thetaReader.close();
        wordsReader.close();

        BufferedWriter writer = IOUtil.getWriter(csvFile);// "unbalance_wn-lda_car_20.csv"
        writer.append(csvHeader + "\n");
        BufferedReader newsReader = IOUtil.getReader(txtFile);// "D:\\sogounews\\unbalance\\SogouCAReducedContent_unbalanced_car_20.txt"
        String nLine = newsReader.readLine();
        while (nLine != null) {
            String category = nLine.substring(0, nLine.indexOf("\t"));
            String news = nLine.substring(nLine.indexOf("\t") + 1);
            String tokens[] = news.trim().split(" ");
            Double commus[] = null;
            for (String token : tokens) {
                Double[] dd = word2theta.get(token);
                if (commus == null) {
                    commus = new Double[dd.length];
                    for (int i = 0; i != commus.length; i++)
                        commus[i] = 0.0;
                }
                for (int i = 0; i != commus.length; i++)
                    commus[i] += dd[i];
            }
            Double norm = 0.0;
            for (int i = 0; i != commus.length; i++)
                norm += commus[i];
            norm = Math.sqrt(norm);
            for (int i = 0; i != commus.length; i++) {
                writer.append(commus[i] / norm + ",");
            }
            writer.append(category + "\n");
            nLine = newsReader.readLine();
        }
        writer.flush();
        writer.close();

    }

    public void generateCsvForWordnet(String thetaParentDir, String txtParentDir, int carnum, int sampnum) throws IOException {
        //TODO: �޸�·��
        String thetaPath = thetaParentDir + "\\car" + carnum + "_sample" + sampnum;
        String thetaFile = thetaPath + "\\model-final.theta";
        String wordsFile = thetaPath + "\\SogouCA_unbalanced_car" + carnum + "_sample" + sampnum + ".words";
        String txtFile = txtParentDir + "\\SogouCA_unbalanced_car" + carnum + "_sample" + sampnum + ".txt";
        String csvFile = "D:\\sogounews_classify\\wordnet" + "\\car" + carnum + "_sample" + sampnum + "_wn.csv";
        this.generateCsvForWordnet(thetaFile, wordsFile, txtFile, csvFile);
        this.csvToArff("D:\\sogounews_classify\\wordnet", "car" + carnum + "_sample" + sampnum + "_wn.csv");
    }


    public static void main(String args[]) throws IOException {
//		GenerateCsv geneCsv = new GenerateCsv();
//		int carnums[] = {20, 40, 60, 80, 100, 200, 400, 600, 800};
//		for(int i = 0 ; i != 9 ; i++){
//			for(int s = 0; s != 10; s++){
//				//TODO:���parent dir
//				if(carnums[i]==40 && s > 6){
//					geneCsv.generateCsvForWordnet("D:\\sogounews\\unbalanced\\wordnet", "D:\\sogounews\\unbalanced\\unbalanced", carnums[i], s);
////					geneCsv.generateCsvForLda("D:\\sogounews\\unbalanced\\topic", "D:\\sogounews\\unbalanced\\unbalanced", carnums[i], s);
//				}
//			}
//		}
        String dir = "L:\\zuoyuan\\wntm-实验数据备份\\sogounews\\content\\reweighted\\";
        GenerateCsv geneCsv = new GenerateCsv();
        geneCsv.generateCsvForWordnet(dir + "model-final.theta", dir + "SogouCAReducedContent.words", "L:\\zuoyuan\\wntm-实验数据备份\\sogounews\\content\\SogouCAReducedContent_new.txt", dir + "model-final.csv");
    }
}
