package ccnl.demo.util;

import org.apache.commons.lang.ArrayUtils;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.Function2;
import org.apache.spark.ml.feature.*;
import org.apache.spark.mllib.linalg.SparseVector;
import org.apache.spark.mllib.linalg.Vector;
import org.apache.spark.mllib.linalg.VectorUDT;
import org.apache.spark.mllib.linalg.Vectors;
import org.apache.spark.rdd.RDD;
import org.apache.spark.sql.*;
import org.apache.spark.sql.catalyst.ScalaReflection;
import org.apache.spark.sql.catalyst.expressions.In;
import org.apache.spark.sql.types.StringType;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import org.codehaus.janino.Java;
import scala.Function1;
import scala.Tuple2;
import scala.util.parsing.combinator.testing.Str;

import javax.xml.crypto.Data;
import java.io.*;
import java.text.DecimalFormat;
import java.util.*;

/**
 * Created by wong on 16/3/20.
 */
public class Preprocess implements Serializable {
    public SQLContext sqlContext;

    public Preprocess(SQLContext sc){
        sqlContext = sc;
    }

    /** read parquet file from hdfs*/

    /**
     *
     * @param parquetPath
     * @return
     */
    public DataFrame readParquetFile(String parquetPath){
        return this.sqlContext.read().parquet(parquetPath);
    }

    /** read json file */
    public DataFrame readJsonFile(String jsonPath){
        return this.sqlContext.read().json(jsonPath);
    }

    /** convert csv to json */
    public void csv2json (String csvPath, String targetPath, Boolean overwriteOrNot) {
        DataFrame df = this.sqlContext.read().format("com.databricks.spark.csv")
                .option("inferSchema", "true").option("header", "true").option("char", "")
                .load(csvPath);
        SaveMode sm = overwriteOrNot ? SaveMode.Overwrite : SaveMode.Append;
        df.write().mode(sm).json(targetPath);
    }

    /** convert csv to parquet */
    public void csv2parquet(String csvPath, String targetPath, Boolean overwriteOrNot) {
        DataFrame df = this.sqlContext.read().format("com.databricks.spark.csv")
                .option("inferSchema", "true").option("header", "true").option("char", "").load(csvPath);
        SaveMode sm = overwriteOrNot ? SaveMode.Overwrite : SaveMode.Append;
        df.write().mode(sm).parquet(targetPath);
    }

    /** filt useless fields */
    public DataFrame filtFields(DataFrame df, String[] usefulList) {
        String[] totalList = df.columns();
        List<String> ul = Arrays.asList(usefulList);
        for (String columnName : totalList) {
            if (!ul.contains(columnName)) df = df.drop(columnName);
        }
        return df;
    }

    /** filt useless values */
    public DataFrame filtValues(DataFrame df) {
        //C0: 正常ID值
        df.registerTempTable("dftable");
        df = sqlContext.sql("select * from dftable where C0='108927' or C0='108456'" +
                " or C0='108457' or C0='109125' or C0='108967' or C0='107853'");

        //C1: 1或0
        df.registerTempTable("dftable");
        df = sqlContext.sql("select * from dftable where C1=1 or C1=0");
        return df;
    }

    /** convert Drt to Segment */
    public DataFrame convertDrt2Seg(DataFrame df) {
        df.registerTempTable("dftable");
        return sqlContext.sql("select *, case " +
                "when substring(C2,12,2)>='00' and substring(C2,12,2)<='02' then '0' " +
                "when substring(C2,12,2)>='03' and substring(C2,12,2)<='05' then '1' " +
                "when substring(C2,12,2)>='06' and substring(C2,12,2)<='08' then '2' " +
                "when substring(C2,12,2)>='09' and substring(C2,12,2)<='11' then '3' " +
                "when substring(C2,12,2)>='12' and substring(C2,12,2)<='14' then '4' " +
                "when substring(C2,12,2)>='15' and substring(C2,12,2)<='17' then '5' " +
                "when substring(C2,12,2)>='18' and substring(C2,12,2)<='20' then '6' " +
                "when substring(C2,12,2)>='21' and substring(C2,12,2)<='23' then '7' " +
                "else '8' end as timeseg from dftable");
    }

    /** one hot encoder */
    public DataFrame oneHotEncoder_field(DataFrame df, String fieldName) {
        StringIndexerModel indexerModel = new StringIndexer().setInputCol(fieldName).setOutputCol(fieldName + "si").fit(df);
        DataFrame indexed = indexerModel.transform(df);
        OneHotEncoder encoder = new OneHotEncoder().setInputCol(fieldName + "si").setOutputCol(fieldName + "ohe");
        DataFrame encoded = encoder.transform(indexed);
        return encoded.drop(fieldName).drop(fieldName + "si");
    }
    public DataFrame oneHotEncoder(DataFrame df, String[] list) {
        for (String fieldName : list)
            df = oneHotEncoder_field(df, fieldName);
        return df;
    }
    public void getWordList(String path) {
        JavaRDD<String> tf;
    }


    public void writeFile(String path, JavaPairRDD<String, Integer> pRDD) throws Exception{
        try{
            FileWriter writer = new FileWriter(path, true);
            pRDD.map(v1 -> {
                writer.write(v1._1() + " " + v1._2());
                return true;
            });
            writer.close();
        }catch (Exception e){
            throw e;
        }
    }

    public DataFrame normalizeField(DataFrame df, String fieldName){
        df.registerTempTable("dftable");
        final int columnNums = df.columns().length;
        final double maxValue = sqlContext.sql("select max(" + fieldName + ") from dftable").first().getDouble(0);
        final double minValue = sqlContext.sql("select min(" + fieldName + ") from dftable").first().getDouble(0);
        System.out.println(maxValue);
        System.out.println(minValue);
        JavaRDD<Row> jrdd2 = df.toJavaRDD();
        JavaRDD<Row> jrdd = jrdd2.map(row -> {
                int index = row.fieldIndex(fieldName);
                Double newValue = (row.getDouble(index) - minValue) / (maxValue - minValue);
                ArrayList arrayList = new ArrayList();
                for (int i = 0; i < columnNums; i++) {
                    if (i == index)
                        arrayList.add(newValue);
                    else
                        arrayList.add(row.get(i));
                }
                return RowFactory.create(arrayList.toArray());

        });

        DataFrame processedDF = sqlContext.createDataFrame(jrdd, df.schema());
        return processedDF;
    }

    public DataFrame normalizeDF(DataFrame df, String[] list) {
        for (String fieldName : list)
            df = normalizeField(df, fieldName);
        return df;
    }

    public DataFrame vectorAssembler(DataFrame df, String[] list) {
        VectorAssembler assembler = new VectorAssembler().setInputCols(list).setOutputCol("features");
        DataFrame output = assembler.transform(df);
        for (String s : list)
            output = output.drop(s);
        return output;
    }

    public JavaPairRDD<String,Integer> getTagFrequence() {
        DataFrame df = sqlContext.read().parquet("/user/hg/iqiyi3/col36.parquet");
        JavaRDD<Row> jrdd = df.toJavaRDD();
        System.out.println("col36 count: " + jrdd.count());
        JavaRDD<String> words = jrdd.flatMap(v1 -> {
            int index = v1.fieldIndex("C36");
            return Arrays.asList(v1.getString(index).split("\\|"));
        });
        System.out.println("flatMap count " + words.count());
        JavaPairRDD<String, Integer> pairs = words.mapToPair(s -> new Tuple2<String, Integer>(s,1));
        JavaPairRDD<String, Integer> counts = pairs.reduceByKey((v1, v2) -> v1 + v2);
//        JavaPairRDD<String, Integer> counts=pairs.reduceByKey(new Function2<Integer, Integer, Integer>() {
//            @Override
//            public Integer call(Integer v1, Integer v2) throws Exception {
//                return v1 + v2;
//            }
//        });
        JavaPairRDD<String,Integer> wordFreRDD = counts.mapToPair(x -> x.swap()).sortByKey(false).mapToPair(x -> x.swap());
        JavaPairRDD<String,Integer> shortWordFre = wordFreRDD.filter(x -> x._2() >= 1050);
        return shortWordFre;

    }

    public List<Tuple2<String, Integer>> getTagFrequenceGivenK(int K) {
        DataFrame df = sqlContext.read().parquet("/user/hg/iqiyi3/col36.parquet");
        JavaRDD<Row> jrdd = df.toJavaRDD();
        System.out.println("col36 count: " + jrdd.count());
        JavaRDD<String> words = jrdd.flatMap(v1 -> {
            int index = v1.fieldIndex("C36");
            return Arrays.asList(v1.getString(index).split("\\|"));
        });
        System.out.println("flatMap count " + words.count());
        JavaPairRDD<String, Integer> pairs = words.mapToPair(s -> new Tuple2<String, Integer>(s,1));
        JavaPairRDD<String, Integer> counts = pairs.reduceByKey((v1, v2) -> v1 + v2);
        JavaPairRDD<String,Integer> wordFreRDD = counts.mapToPair(x -> x.swap()).sortByKey(false).mapToPair(x -> x.swap());

        return wordFreRDD.take(K);

    }

    public void getDenseVectorGivenK(int K) {
        DataFrame dataFrame = sqlContext.read().parquet("/user/hg/iqiyi3/3_segDrt.parquet");
        List<Tuple2<String, Integer>> wordlist = getTagFrequenceGivenK(K);
        HashMap<String, Integer> hm = new HashMap<>();
        int hmindex = 0;
        for (Tuple2<String, Integer> t2 : wordlist) {
            hm.put(wordlist.get(hmindex)._1(), hmindex);
            hmindex ++;
        }
//        JavaPairRDD<String,Integer> wl = getTagFrequence();
//        Map<String, Integer> m = wl.collectAsMap();

        long length = hm.size();
        //System.out.println(hm.toString() + length);
        JavaRDD<Row> jrdd = dataFrame.toJavaRDD();
        int columnNum = dataFrame.columns().length;
        StructType oldSchema = dataFrame.schema();
        StructType newSchema = oldSchema.add("tagvec", new VectorUDT(), true);

        JavaRDD<Row> jrdd2=jrdd.map(x -> {
            int index = x.fieldIndex("C36");

            if (x.getString(index) != "") {
                String[] list = x.getString(index).split("\\|");
                ArrayList<Integer> indexL=new ArrayList<>();
                ArrayList<Double> valueL=new ArrayList<>();
                for (String s : list) {
                    if (hm.containsKey(s)) {
                        indexL.add(hm.get(s));
                        valueL.add(1.0);
                    }
                }
                Integer[] il = indexL.toArray(new Integer[indexL.size()]);
                Double[] dl = valueL.toArray(new Double[valueL.size()]);

                int[] primitiveList = ArrayUtils.toPrimitive(il);
                Arrays.sort(primitiveList);

                Vector v = Vectors.sparse((int) length, primitiveList, ArrayUtils.toPrimitive(dl));

                ArrayList rowl = new ArrayList();
                for (int i = 0; i < columnNum; i++) {
                    rowl.add(x.get(i));
                }
                rowl.add(v);
                return RowFactory.create(rowl.toArray());
            }
            else {
                Vector v = Vectors.sparse((int) length, new int[0], new double[0]);
                ArrayList rowl = new ArrayList();
                for (int i = 0; i < columnNum; i++) {
                    rowl.add(x.get(i));
                }
                rowl.add(v);
                return RowFactory.create(rowl.toArray());
            }

        });
        DataFrame pro = sqlContext.createDataFrame(jrdd2, newSchema);
        pro.write().mode(SaveMode.Overwrite).parquet("/user/hg/iqiyi3/tag.parquet");
    }

    public void getDenseVectorTest() {
        DataFrame dataFrame = sqlContext.read().parquet("/user/hg/iqiyi3/col36.parquet");
        JavaPairRDD<String,Integer> wl = getTagFrequence();
        Map<String, Integer> m = wl.collectAsMap();
        HashMap<String, Integer> hm = new HashMap<>();
        int hmindex = 0;
        for (String s : m.keySet()) {
            hm.put(s, hmindex);
            hmindex++;
        }

        long length = wl.count();
        JavaRDD<Row> jrdd = dataFrame.toJavaRDD();
        int columnNum = dataFrame.columns().length;
        StructType oldSchema = dataFrame.schema();
        StructType newSchema = oldSchema.add("tagvec", new VectorUDT(), true);
       // StructType st = new StructType().add("C36", new StringType(), true).add("vecc", new VectorUDT(), true);

        JavaRDD<Row> jrdd2=jrdd.map(x -> {
            //return RowFactory.create(Vectors.sparse(3,new int[] {1,2},new double[] {1.0,2.0}));
            int index = x.fieldIndex("C36");

            if (x.getString(index) != "") {
                String[] list = x.getString(index).split("\\|");
                ArrayList<Integer> indexL=new ArrayList<>();
                ArrayList<Double> valueL=new ArrayList<>();
                for (String s : list) {
                    if (hm.containsKey(s)) {
                        indexL.add(hm.get(s));
                        valueL.add(1.0);
                    }
                }
                ArrayList sl = new ArrayList();
                Integer[] il = indexL.toArray(new Integer[indexL.size()]);
                Double[] dl = valueL.toArray(new Double[valueL.size()]);
                int[] primitiveList = ArrayUtils.toPrimitive(il);
                Arrays.sort(primitiveList);
                Vector v = Vectors.sparse((int) length, primitiveList, ArrayUtils.toPrimitive(dl));

                ArrayList rowl = new ArrayList();
//                for (int i = 0; i < columnNum; i++) {
//                    rowl.add(x.get(i));
//                }
                rowl.add(x.get(0));rowl.add(v);
                return RowFactory.create(rowl.toArray());
                //return RowFactory.create(x.get(0), v);
            }
            else {
                Vector v = Vectors.sparse((int) length, new int[0], new double[0]);
                ArrayList rowl = new ArrayList();
//                for (int i = 0; i < columnNum; i++) {
//                    //Row newr = (Row) rowl.clone();
//                    rowl.add(x.getString(index));
//                }
                rowl.add(x.get(0));rowl.add(v);
                return RowFactory.create(rowl.toArray());
//                return RowFactory.create(x.get(0), v);
            }

        });
        DataFrame pro = sqlContext.createDataFrame(jrdd2, newSchema);
        pro.write().mode(SaveMode.Overwrite).parquet("/user/hg/iqiyi3/tagTest.parquet");
    }
    public void getDenseVector() {
        DataFrame dataFrame = sqlContext.read().parquet("/user/hg/iqiyi3/3_segDrt.parquet");
        //DataFrame vec=sqlContext.read().parquet("/user/hg/iqiyi3/vec.parquet");
        JavaPairRDD<String,Integer> wl = getTagFrequence();
        //HashMap<String,Integer> hm=new HashMap<String, Integer>();
        //Integer index=0;
        Map<String, Integer> m = wl.collectAsMap();
        HashMap<String, Integer> hm = new HashMap<>();
        int hmindex = 0;
        for (String s : m.keySet()) {
            hm.put(s, hmindex);
            hmindex++;
        }

        long length = wl.count();
        JavaRDD<Row> jrdd = dataFrame.toJavaRDD();
        int columnNum = dataFrame.columns().length;
        StructType oldSchema = dataFrame.schema();
        StructType newSchema = oldSchema.add("tagvec", new VectorUDT(), true);

        JavaRDD<Row> jrdd2=jrdd.map(x -> {
            //return RowFactory.create(Vectors.sparse(3,new int[] {1,2},new double[] {1.0,2.0}));
            int index = x.fieldIndex("C36");

            if (x.getString(index) != "") {
                String[] list = x.getString(index).split("\\|");
                ArrayList<Integer> indexL=new ArrayList<>();
                ArrayList<Double> valueL=new ArrayList<>();
                for (String s : list) {
                    if (hm.containsKey(s)) {
                        indexL.add(hm.get(s));
                        valueL.add(1.0);
                    }
                }
                Integer[] il = indexL.toArray(new Integer[indexL.size()]);
                Double[] dl = valueL.toArray(new Double[valueL.size()]);

                int[] primitiveList = ArrayUtils.toPrimitive(il);
                Arrays.sort(primitiveList);

                Vector v = Vectors.sparse((int) length, primitiveList, ArrayUtils.toPrimitive(dl));

                ArrayList rowl = new ArrayList();
                for (int i = 0; i < columnNum; i++) {
                    rowl.add(x.get(i));
                }
                rowl.add(v);
                return RowFactory.create(rowl.toArray());
            }
            else {
                Vector v = Vectors.sparse((int) length, new int[0], new double[0]);
                ArrayList rowl = new ArrayList();
                for (int i = 0; i < columnNum; i++) {
                    rowl.add(x.get(i));
                }
                rowl.add(v);
                return RowFactory.create(rowl.toArray());
            }

        });
        //StructType schema2=new StructType();
        //schema2.add("tag",vec.schema());
        //Column c=
        //StructType schema2=dataFrame.schema().add("tag","vector");
        //DataFrame process=sqlContext.createDataFrame(jrdd2,dataFrame.schema());
        //process.write().mode(SaveMode.Overwrite).parquet("/user/hg/iqiyi3/tag.parquet");
        //List<Vector> tmp = jrdd2.collect();
        //System.out.println(tmp.get(0).getClass().getName());
        //DataFrame pro= sqlContext.createDataFrame(jrdd2, SparseVector.class);
        DataFrame pro = sqlContext.createDataFrame(jrdd2, newSchema);
        pro.write().mode(SaveMode.Overwrite).parquet("/user/hg/iqiyi3/tag.parquet");
    }

    public DataFrame discreteCtr(DataFrame df, double step) {
        JavaRDD<Row> jrdd = df.toJavaRDD();
        int len = df.columns().length;
        JavaRDD<Row> jrdd2 = jrdd.map(v -> {
            int index = v.fieldIndex("usefulctr");
            int newInt = (int) (v.getDouble(index)/step);
            ArrayList a = new ArrayList();
            for (int i = 0; i < len; i++) {
                if (i == index) a.add((double) newInt);
                else a.add(v.get(i));
            }
            return RowFactory.create(a.toArray());
        });
        DataFrame newdf = sqlContext.createDataFrame(jrdd2, df.schema());
        return newdf;
    }

    public void checkSparse(String filePath, String resultLoc) {
        /**
         * 统计空值比例
         */
        DataFrame df = this.sqlContext.read().json(filePath);
        df.registerTempTable("tmpTable");
        long empty_num = 0;
        long total_num = df.count();
        double ratio_num;
        DecimalFormat fm = new DecimalFormat("0.00000");
        String queryStr = "";
        String result_Str = "";
        int schemaLength = df.columns().length;
        try {
            File file = new File(resultLoc);
            if (!file.exists()) {
                file.createNewFile();
            }
            FileWriter fw = new FileWriter(resultLoc, false);

            for (int i = 0; i < schemaLength; i++) {
                queryStr = "select C" + i + " from tmpTable where C" + i + " is null or C" + i + "=''";
                empty_num = this.sqlContext.sql(queryStr).count();
                ratio_num = (double)empty_num / total_num;
                result_Str = "C" + i + "  empty: " + empty_num + "  total: " + total_num
                        + "  ratio: " + fm.format(ratio_num) + "\n";
                fw.write(result_Str);
            }

            fw.close();
        }catch (Exception e) {
            e.printStackTrace();
        }


    }

    public void create01to08_Par() {
        DataFrame df = this.sqlContext.read().format("com.databricks.spark.csv")
                .option("inferSchema", "true")
                .load("/user/hg/2015-06-0[1-7].csv");
        df.registerTempTable("01to08_table");
        df.write().parquet("/usr/hg/json/01to08_table");
    }

    public DataFrame cntVec(DataFrame df, String inputCol, String outputCol) {
        CountVectorizerModel cvmodel = new CountVectorizer()
                .setInputCol(inputCol)
                .setOutputCol(outputCol).fit(df);
        return cvmodel.transform(df);
    }
    public void dataframe2csv(DataFrame df, String path) {
        df.write().format("com.databricks.spark.csv").option("header", "true")
                .save(path);
    }

//    public DataFrame PCAprocess(DataFrame df, int num) {
//        LinkedList<Vector> rowsList = new LinkedList<>();
//        JavaRDD<Row> jrdd = df.toJavaRDD();
//        JavaRDD<Vector> rows = jrdd.map(row -> {
//            int index = row.fieldIndex("fea");
//            row.get(index);
//        });
//    }
}
