# -*- coding: utf-8 -*-
import sys
import logging
import datetime
import os

import re
from pyspark import Row

sys.path.append("../")
from pyspark.ml.feature import Tokenizer, StopWordsRemover
from pyspark.sql.types import *
import pymongo
import jieba


class Clustering(object):
    LIMIT_COUNT = 2
    STOPWORDS = []

    def __init__(self, ctx, df, params,milling=True):

        # 初始化部分参数
        self.params = params
        self.outPutConfig = params["outPutConfig"]
        self.LIMIT_COUNT = self.params["limitCount"]
        self.STOPWORDS = self.params["defaultStopWords"]
        self.jiebaObj = jieba
        self.jiebaObj.load_userdict(open(os.getcwd() + "/data/dic.txt"))
        self.jiebaObj.setLogLevel("ERROR")
        self.spark = ctx
        """这里来进行指定的参数过滤方式"""
        if milling:
            # 第0步，我要先过滤掉特殊字符
            df = self.filterSpecailChar(df)
            # 第一步，分词（根据参数是否是中文选择不同的分词函数）
            if self.params["enableZH"]:
                df = self.jiebaCut(df)
            else:
                df = self.tokenizeRDD(df)
            # 第二步，过滤系统默认停用词
            df = self.stopWords(df)
            if self.params["enableZH"]:
                # 这里要一个中文停用词库
                df = self.stopDefaultWords(df)
            # 第三步，过滤用户自定义过滤词
            if self.params["stopWords"]:
                df = self.stopUserWords(df)
            # 第四步，根据用户配置是否过滤纯数字
            if not self.params["allowNumber"]:
                df = self.filterNumber(df)
            # 第五步，根据用户配置，确定过滤字数小于指定数目的单词
            self.df = self.filterLength(df)
        else:
            self.df = df

    def filterSpecailChar(self, df):
        def filterZHChar(str):
            r = u'[’!"#$%&\'()*+,-./:;<=>?@，\\：。?★、…【】《》？“”‘’！[\\]^_`{|}~]+'
            try:
                tempStr =  re.sub(r, '', str)
            except Exception as e:
                tempStr = str
            return tempStr

        def filterENChar(str):
            r = u'[’!"#$%&\'()*+,-./:;<=>?@，：。\\?★、…【】《》？“”‘’！[\\]^_`{|}~]+'
            try:
                tempStr =  re.sub(r, '', str)
            except Exception as e:
                tempStr = str
            return tempStr

        dfW = df.rdd.map(lambda x: x["html"]).map(lambda y: Row(html=filterENChar(y))).toDF()
        return dfW

    def filterLength(self, df):
        """过滤字符长度小于自定数目的单词"""
        limitCount = self.LIMIT_COUNT
        def filterArrayLength(x):
            temp = []
            for w in x:
                if len(w) >= limitCount:
                    temp.append(w)
            return temp
        schema = StructType([
            StructField("words", ArrayType(StringType()), True)
        ])
        dfL = df.rdd.map(lambda x: x["words"]).map(lambda x: Row(words=filterArrayLength(x)))
        return self.spark.createDataFrame(dfL,schema=schema)

    def filterNumber(self, df):
        """过滤纯数字或者数字和特殊字符组合"""
        def filterArrayNumber(x):
            temp = []
            for w in x:
                try:
                    if not w.isdigit():
                        temp.append(w)
                except Exception as e:
                    pass
            return temp
        dfN = df.rdd.map(lambda x: x["words"]).map(lambda x: Row(words=filterArrayNumber(x))).toDF()
        return dfN

    def tokenizeRDD(self, df):
        """分词实现"""
        # 先使用分词库分词
        tokenizer = Tokenizer(inputCol="html", outputCol="words")
        dfT = tokenizer.transform(df)
        return dfT.select("words").toDF("words")

    def stopWords(self, df):
        """使用ml库中的df高级api，去掉单词中的停用词，包含了绝大多是的常见辅助词，然后也需要去停用用户指定出来的特殊词"""
        stopWordsObj = StopWordsRemover(inputCol="words", outputCol="filterWords")
        dfF = stopWordsObj.transform(df)
        return dfF.select("filterWords").toDF("words")

    def stopDefaultWords(self, df):
        """加载中文停用词典"""
        stopWords = self.STOPWORDS
        def removeUserWords(y):
            temp = []
            for w in y:
                if w not in stopWords:
                    temp.append(w)
            return temp
        dfUF = df.rdd.map(lambda x: x["words"]).map(lambda y: Row(words=removeUserWords(y))).toDF()
        return dfUF

    def stopUserWords(self, df):
        """再次调用stopWords，或者使用rdd的filter功能"""
        stopWordsObj = StopWordsRemover(inputCol="words", outputCol="userFilterWords",
                                        stopWords=self.params["stopWords"].split(" "))
        dfUF = stopWordsObj.transform(df)
        return dfUF.select("userFilterWords").toDF("words")

    def jiebaCut(self, df):
        """调用结巴分词，进行中文分词"""
        jiebaObj = self.jiebaObj
        def getJiebaCut(x):
            return list(jiebaObj.cut(x))
        dfJB = df.rdd.map(lambda x: Row(words=getJiebaCut(x["html"]))).toDF()
        return dfJB

    def getWordsFromIndex(self,index,bdMapList):
        #循环遍历bdMapList
        valueList = bdMapList.value
        nullStr = "null"
        for i in range(len(valueList)):
            if valueList[i][0] == index:
                return valueList[i][1]
        return nullStr

    def storeResultsToDataBase(self,results):
        if self.outPutConfig["outDataType"] == "mongodb":
            # results.write.format("com.mongodb.spark.sql.DefaultSource")\
            #     .mode("overwrite")\
            #     .options(
            #         uri=self.outPutConfig["outUri"],
            #         database=self.outPutConfig["outbase"],
            #         collection=self.outPutConfig["outCollection"]+"_dataFrame"
            #     ).save()
            results["create_time"] = datetime.datetime.now()
            pymongo.MongoClient(self.outPutConfig['outUri']).\
                get_database(self.outPutConfig['outbase']).\
                get_collection(self.params["clustering"] + "_" + self.outPutConfig['outCollection']).save(results)

        if  self.outPutConfig["outDataType"] == "mysql":
            pass

    def outPut(self,results,bdMapList):
        import time
        reload(sys)
        sys.setdefaultencoding('utf-8')
        indices = results.select("topic").collect()
        termIndices = results.select("termIndices").collect()
        termWeights = results.select("termWeights").collect()
        results_name = getattr(self,"__class__").__name__ +"_" +str(time.time())
        outPutStr = "#####\n"
        outPutArr = []
        for i in range(len(indices)):
            outPutDic = {}
            outPutDic['words'] = []
            outPutDic['weight'] = []
            outPutDic['topic'] = i
            outPutDic['results_name'] = results_name
            outPutStr += "Topic_" + str(i) + ":\n"
            for j in range(len(termIndices[i]['termIndices'])):
                outPutDic['words'].append(self.getWordsFromIndex(termIndices[i]['termIndices'][j],bdMapList))
                outPutDic['weight'].append(str(termWeights[i]['termWeights'][j]))
                outPutStr += "" + self.getWordsFromIndex(termIndices[i]['termIndices'][j],bdMapList) + "   " + str(
                    termWeights[i]['termWeights'][j]) + "\n"
            outPutArr.append(outPutDic)
            if self.outPutConfig["outWay"] == "database":
                self.storeResultsToDataBase(outPutDic)
        outPutStr += "#####\n"


        if self.outPutConfig["outWay"] == "console":
            print outPutStr

        if self.outPutConfig["outWay"] == "textFile":
            import time
            try:
                os.system("mkdir ../resultsData")
            except Exception as e:
                print e
            fileName = self.outPutConfig["outFilePath"] + getattr(self,"__class__").__name__ +"_" +str(time.time()) + ".txt"

            f = open(fileName,"w")
            f.write(outPutStr.decode("utf-8").encode("utf-8"))
            f.close()

        if self.outPutConfig["outWay"] == "database":
            pass











