# -*- coding: UTF-8 -*-
import getopt
import json
import sys
import os
import base64

try:
    import chardet
except ImportError as e:
    print "未安装chardet，pip install"
    os.system("pip install chardet")
    import chardet
#写在index.py直接先行安装jieba
try:
    import jieba
except ImportError as e:
    print "结巴分词没安装，pip install"
    os.system("pip install jieba")
    import jieba

from util.SparkConnectionBase import SparkDBConnection

DEFAULT_DATA_CONF = dict(
    # 集群相关配置
    sparkMaster="local[*]",
    appName="KD_XApi",
    isLocal=True,
    sparkWarehousePath=sys.path[0] + "/spark-warehouse",
    # 数据源相关配置
    inputType="mysql",
    #下面为mongodb相关的内容
    inputUri="mysql://175.102.18.112:3306/",
    inputBase="kd_xapi",
    user="root",
    password="new.123",
    inputCollection="std_info",
    inputSqlScheme=dict(
        html="str",
        title="str"
    ),
    #为textfile相关
    inputPath="/Users/xiaojun/Desktop/wechat_topic.csv"
)

DEFAULT_CLUSTER_CONF = dict(
    clustering="KNN",
    clusteringParams=dict(
        k=10,
        maxIter=20,
        optimizer="em",
        describeTopics=10
    ),
    # 数据处理相关配置
    enableZH=True,
    allowNumber=False,
    limitCount=2,
    stopWords="成人教育 教育 我国 发展 分析 建设 学习",
    defaultStopWords=[],
    # 数据输出配置
    outPutConfig=dict(
        outWay="console", #textFile, console, database
        outFilePath="../resultsData/",
        outDataType="mongodb",
        outUri="mongodb://127.0.0.1:27017/",
        outbase="openCourseOut_online",
        outCollection="course_html_out",
        outSqlSchema=dict(
            results="str"
        ))
)


def main(argv):
    """程序主入口函数"""
    # 直接使用全局变量，传递到后面的数据中去
    DEFAULT_CLUSTER_CONF = {}
    DEFAULT_CLUSTER_CONF['clustering'] = "KNN"

    # 实例化sparkDBConnection的基本连接类
    loader = SparkDBConnection(DEFAULT_DATA_CONF).loader
    # 获取到数据的spark.dataFrame
    dataFrame = loader.dbCache().DataFrame
    # 获取sparkSession示例
    spark = loader.getSparkSession()
    # 设置log级别
    spark.sparkContext.setLogLevel("ERROR")
    # 动态导入类
    modelName = "models"
    classFile = DEFAULT_CLUSTER_CONF["clustering"].lower()
    className = DEFAULT_CLUSTER_CONF['clustering'].upper() + "Clustering"
    importStr = "from " + modelName + "." + classFile + " import " + className + " as classObj"
    try:
        exec importStr
    except ImportError as e:
        print u"模块初始化失败，请检查传入的算法是否存在:"
        print e
        sys.exit()

    # print DEFAULT_CLUSTER_CONF
    DEFAULT_CLUSTER_CONF = loadStopWords(DEFAULT_CLUSTER_CONF)
    instanceObj = classObj(spark, dataFrame, DEFAULT_CLUSTER_CONF)
    instanceObj.clustering()


def loadConf(conf, confBase, type="cover"):
    """通用的2个字典属性覆盖的方法"""
    baseKeys = confBase.keys()
    loadKeys = conf.keys()
    for key in loadKeys:
        if type == 'cover':
            if key in baseKeys:
                if isinstance(confBase[key],dict):
                    #如果为字典参数则：
                    dict_baseKeys = confBase[key].keys()
                    dict_loadKeys = conf[key].keys()
                    for dict_key in dict_loadKeys:
                        if dict_key in dict_baseKeys:
                            confBase[key][dict_key] = conf[key][dict_key]
                else:
                    confBase[key] = conf[key]
        if type == "append":
            confBase[key] = conf[key]
        if type == "short":
            if key in baseKeys:
                del confBase[key]
    return confBase


def loadStopWords(conf):
    f = open("data/stopWords_zh.txt", "r")
    lines = f.readlines()
    stopWords = []
    for line in lines:
        stopWords.append(line.strip().decode(chardet.detect(line.strip())["encoding"]))
    conf['defaultStopWords'] = conf['defaultStopWords'] + stopWords
    return conf


if __name__ == "__main__":
    main(sys.argv[1:])
