import ccnl.demo.algo.DT;
import ccnl.demo.algo.GBT;
import ccnl.demo.algo.LogisticReg;
import ccnl.demo.algo.LogisticRegSGD;
import ccnl.demo.util.Once;
import ccnl.demo.util.Preprocess;
import org.apache.spark.Accumulator;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.ml.feature.StringIndexer;
import org.apache.spark.sql.DataFrame;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.sql.SaveMode;

import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
//import scala.collection.immutable.HashMap;

/**
 * Created by wong on 16/3/15.
 */
public class entry implements Serializable {
    public static void main(String[] args) {

        SparkConf conf = new SparkConf().setAppName("entry").setMaster("spark://222.201.145.253:7077");
        JavaSparkContext sc = new JavaSparkContext(conf);
        SQLContext sqlContext = new SQLContext(sc);

//        Once o = new Once(sqlContext);
//        o.combineURLandAdID2("/user/hg/iqiyi5/whole_clean_combineurl_5.parquet");
        Preprocess preprocess = new Preprocess(sqlContext);
        DataFrame df = sqlContext.read().parquet("/user/hg/now5/top107/whole_final_removeCountryEmpty.parquet");
        preprocess.dataframe2csv(df, "/user/hg/now5/top107/whole_final_removeCountryEmpty.csv");

//        df = preprocess.cntVec(df, "ym_interest", "intVec");
//        System.out.println(Arrays.toString(df.columns()));
//        df.write().parquet("/user/hg/now5/whole_final_intVec_kwVec.parquet");
//        df = preprocess.convertDrt2Seg(df);
//        String[] arr = {"d_adx_20160704_6.csv", "d_adx_20160705_6.csv"
//                , "d_adx_20160706_6.csv", "d_adx_20160707_6.csv"
//                , "d_adx_20160708_6.csv", "d_adx_20160709_6.csv"
//                , "d_adx_20160710_6.csv", "d_adx_20160711_6.csv"
//                , "d_adx_20160712_6.csv", "d_adx_20160713_6.csv"
//                , "d_adx_20160714_6.csv", "d_adx_20160715_6.csv"
//                , "d_adx_20160716_6.csv", "d_adx_20160717_6.csv"
//                , "d_adx_20160718_6.csv", "d_adx_20160719_6.csv"
//                , "d_adx_20160720_6.csv", "d_adx_20160721_6.csv"
//                , "d_adx_20160722_6.csv", "d_adx_20160723_6.csv"
//                , "d_adx_20160724_6.csv"};
//        for (String sourcename : arr) {
//            String targetfile = sourcename.substring(0, 17) + "parquet";
//            System.out.println("start " + sourcename + " --> " + targetfile);
//            preprocess.csv2parquet("/user/hg/now2/" + sourcename, "/user/hg/now5/" + targetfile, true);
//            System.out.println("end " + targetfile);
//        }
//        LogisticReg lr = new LogisticReg(sc, sqlContext);
//        LogisticRegSGD lrsgd = new LogisticRegSGD(sc, sqlContext);
//        int[] times = {50, 100, 200, 400, 700, 1100, 1400, 1800, 2200};
//        int[] times = {1377};
//        for (int i : times) {
//            preprocess.getDenseVectorGivenK(i);
//            DataFrame df=sqlContext.read().parquet("/user/hg/iqiyi3/tag.parquet");
//            String[] list={"C8","C13","C20","C22","C37","C38","p"};
//            df=preprocess.oneHotEncoder(df,list);
//            String[] list2={"C8ohe","C13ohe","C20ohe","C22ohe","C37ohe","pohe","C38ohe","C24","C25","tagvec"};
//            df=preprocess.vectorAssembler(df,list2);
//            lr.runJob(df.toJavaRDD());
//        }

        //preprocess.getTest();

        //JavaRDD<String> tf=sc.textFile("/user/hg/iqiyi3/wordlist_vk_sorted.txt");

        //for (String i:tf.take(20)) System.out.println(i);
        //preprocess.csv2parquet("/user/hg/iqiyi/iqiyi_*.csv","/user/hg/iqiyi5/whole.parquet",true);
//        DataFrame df=preprocess.readParquetFile("/user/hg/iqiyi4/final_withC0.parquet");
        //df=preprocess.filtValues(df);
        //String[] usefulList={"C0","C1","C2","C8","C13","C20","C22","C24","C25","C36","C37","C38"};
        //df=preprocess.filtFields(df,usefulList);
        //df=preprocess.convertDrt2Seg(df);
//        df = preprocess.discreteCtr(df, 0.1);
//        df = preprocess.normalizeDF(df, list);
//        String[] list = {"C8","C13","C20","C37","C38","timeseg"};
//        String[] list1 = {"usefulctr"};
//        df=preprocess.oneHotEncoder(df,list1);

//        String[] list = {"C8ohe","C13ohe","C20ohe","C37ohe","C38ohe","timesegohe","C24","usefulctrohe"};
//        df = preprocess.vectorAssembler(df,list);
//        lr.runJob(df.toJavaRDD());
//        df.write().mode(SaveMode.Overwrite).parquet("/user/hg/iqiyi5/test_under_14-17avg/3_ctrnormal_discrete_0_1.parquet");


        //GBT gbt=new GBT(sqlContext);
        //gbt.GBTEntry("/user/hg/iqiyi2/df4_final2.parquet");
        //DT dt=new DT(sqlContext);
        //dt.DTEntry("/user/hg/iqiyi2/df4_final2.parquet");

        //lr.labWithSample("/user/hg/iqiyi2/8_assembled.parquet");
        //preprocess.filtFields(df,uselessList);

        //String p = "/user/hg/iqiyi4/2_ohe.parquet";
        //lr.anotherLR(p);
//        lr.runFieldListLab("/user/hg/iqiyi4/2_ohe.parquet");
        //DataFrame df2=preprocess.NormalizeColumn(df,"C25");
        //df2=preprocess.NormalizeColumn(df2,"C24");

    }



    public static void wordCount(){
        String logFile = "/user/hg/README.txt";
        SparkConf conf = new SparkConf().setAppName("work1");
        JavaSparkContext sc = new JavaSparkContext(conf);
        JavaRDD<String> logData = sc.textFile(logFile).cache();



        long numAs = logData.filter(new Function<String,Boolean>() {
            public Boolean call(String s) {return s.contains("a");}
        }).count();

        long numBs = logData.filter(new Function<String,Boolean>() {
            public Boolean call(String s) {return s.contains("b");}
        }).count();
        System.out.println("Lines with a is' : " + numAs + ", lines with b is' : " + numBs);
    }
    public static void checkMissingValue() {
        SparkConf conf = new SparkConf().setAppName("cMV");
        JavaSparkContext sc = new JavaSparkContext(conf);
        SQLContext sqlContext = new SQLContext(sc);

        HashMap<String,String> options = new HashMap<String,String>();
        options.put("header", "true");
        options.put("path", "/user/hg/test.csv");

        DataFrame df = sqlContext.load("com.databricks.spark.csv", options);
        df.show();
    }
}
