# -*- coding: utf-8 -*-

import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import jieba
from jieba import posseg
import collections
import copy
import codecs
from Recommender.General import *
import Core.Global as Global


# 预置词库
def Build_UserDefinedVocabularyFile(database, filepath):
    print("Build User Defined Vocabulary File")
    #databaseSimulation = MySQLDB.MySQLDB("172.25.28.10", "3306", username="stock", password="stock123@PWD")
    tag_keyword = database.Find('rec_article', 'tag_keyword')
    vocabulary = []
    f = codecs.open(filepath, 'w+', 'utf-8')
    for tag in tag_keyword:
        word = tag['name']
        vocabulary.append(word)
        f.write(word + '\r\n')
    f.close

    return vocabulary

# ---Text to Segments and Save to Database---
def Text_to_WordSegments_Batch(database):
    filter = {"limit": 1000}
    filter = {}
    articleDocs = database.Find("Recommender", "Article", filter)
    #
    i = 0
    for articleDoc in articleDocs:
        i += 1
        text = articleDoc["content"]
        id = articleDoc["Key"]
        print(i, datetime.datetime.now())
        segments = Text_to_WordSegment(text)
        database.Upsert("Recommender", "Article", target={"Key": id}, document={"segments": segments})
        pass


def WordSegment_Jieba(text, userdict_pathfilename=""):
    if userdict_pathfilename != "":
        jieba.load_userdict(userdict_pathfilename)
    # 词性
    pos = ['n', 'nz', 'nt', 'nr', 'ns', 'v', 'vn', 'j', 'x']
    raw_segments = posseg.cut(text)
    segments = []
    for j in raw_segments:
        if (len(j.word) >= 2) and (j.flag in pos):
            segments.append(j.word)

    #
    return segments


def Text_to_WordSegment(text):
    return WordSegment_Jieba(text, "UserDefinedVocabulary.txt")


def Calc_IDF_Batch(database, filter={}):

    print("Calc_IDF_Batch ", datetime.datetime.now())

    # ---load vocabulary---
    vocabularyDoc = database.Find("rec_article", "tag_keyword")

    # ---cache for vocabulary appearances---
    vocabulary = {}
    for word in vocabularyDoc:
        tagValue = {}
        tagValue["Appearance"] = 0
        tagValue["IdfValue"] = 0
        tagValue["TagId"] = word["id"]
        vocabulary[word["name"]] = tagValue

    # ---build params---
    params = {}
    params["DataBase"] = database
    params["StartIndex"] = 0 # 12840
    params["EndIndex"] = 0
    params["Vocabulary"] = vocabulary

    # ---load articles---
    # articleDocs = database.Find("rec_article", "article_publish", filter)
    # total_text = len(articleDocs)

    # ---loop articles to evaluate appearance of word---
    count = Batch_Loop(database, "rec_article", "article_publish",
                       callback=Calc_IDF_Batch_Callback,
                       params=params,
                       batch_size=1000,
                       printField="title",
                       filter=filter
                       )
    total_text = count

    # ---Use Good-Turning to Adjust Appearance# ---
    vocabulary = Good_Turing(vocabulary, adjustThreshod=10)

    # ---calculate idf---
    # idf_vector = Calc_IDFVector(corpus, vocabulary)
    for word, value in vocabulary.items():
        vocabulary[word]["IdfValue"] = np.log(total_text / (1 + vocabulary[word]["Appearance"]))

    # ---save to database---
    # database.Upsert("Recommender", "Vocabulary", {"Key": "Corpus5_TF"}, {"IDF": idf_vector})
    dataList = []
    for word, value in vocabulary.items():
        document = {}
        document["tag"] = word
        document["tag_id"] = value["TagId"]
        document["idf_value"] = value["IdfValue"]
        dataList.append(document)
        # database.Insert("rec_article", "tag_idf", document)
        pass
    #
    database.Insert_Many("rec_article", "tag_idf", dataList)
    pass


def Calc_IDF_Batch_Callback(count, documents, params):
    # ---Retrieve Parameters---
    startIndex = params["StartIndex"]
    endIndex = params["EndIndex"]
    vocabulary = params["Vocabulary"]

    # id Cache
    methodParams = {}
    methodParams["IDbyWord"] = {}

    # ---Loop whitin Batch---
    for articleDoc in documents:
        count += 1
        if count < startIndex:
            continue

        if "title" in articleDoc:
            print(count, articleDoc["title"], datetime.datetime.now())

        text = articleDoc["content"]
        if text == None:
            continue

        segments = Text_to_WordSegment(text)
        #
        wordCount = collections.Counter(segments)
        # wordCount (in article) less than vocabulary
        for word, wordCount in wordCount.items():
            if word in vocabulary.keys():
                vocabulary[word]["Appearance"] += 1

    # ---End of Loop---
    return count


def Good_Turing(vocabulary, adjustThreshod):
    #---提取频次appearance
    appearance = []
    for word, value in vocabulary.items():
        appearance.append([word, vocabulary[word]["Appearance"]])
    appearance_df = pd.DataFrame(appearance, columns=['term', 'appearance'])

    #---统计频次并从小到大排序
    count = appearance_df['appearance'].value_counts()
    count_df = pd.DataFrame(count).sort_index(ascending=True)
    count_df.columns = ['count']
    count_df['appearance'] = count_df.index
    count_df.index = range(len(count_df))
    denom = (count_df['appearance'] * count_df['count']).sum()  #总频次，维持不变

    #---得出频次为limit的index，若limit超出范围则取全部，若未超出范围且不包含limit的频次，则往后+1
    limit_index = 0
    while adjustThreshod not in count_df['appearance'].tolist():
        if max(count_df['appearance'].tolist()) < adjustThreshod:
            limit_index = len(count_df)-1
            break
        else:
            adjustThreshod += 1

    if limit_index == 0:
        limit_index = count_df['appearance'].tolist().index(adjustThreshod)

    #---更新appearance（更新前的频次统计为count_df，更新后为count_df_after）
    count_df_after = copy.deepcopy(count_df)
    for i in range(1, limit_index):
        r = count_df.loc[i, 'appearance']
        dr = (r + 1) * count_df.loc[i + 1, 'count'] / count_df.loc[i, 'count']
        count_df_after.loc[i, 'appearance'] = dr

    count_df_after.loc[0, 'appearance'] = (denom - (count_df_after['appearance'] * count_df_after['count']).sum()) / \
                                          count_df_after.loc[0, 'count']

    #---根据原始appearance对应的index取更新后的appearance
    for word, value in vocabulary.items():
        appearance_before = vocabulary[word]["Appearance"]
        appearance_before_index = count_df['appearance'].tolist().index(appearance_before)
        appearance_after = count_df_after.loc[appearance_before_index, 'appearance']
        vocabulary[word]["Appearance"] = appearance_after

    return vocabulary


#---Calc IDF Vector---
def Calc_IDFVector(texts, vocabulary):
    #idf = []
    idf_vector = []

    #
    total_text = len(texts)

    #
    segmentsList = []
    for texti in range(total_text):
        text = texts[texti]
        segments = Text_to_WordSegment(text)
        segmentsList.append(segments)

    #
    i = 0
    for word in vocabulary:
        i += 1
        if i % 100 == 0:
            print("Processed ", i, " words")

        appears = 0
        for texti in range(total_text):
            # content = data.loc[s,'content']
            segments = segmentsList[texti]
            if word in segments:
                appears += 1
            #else:
            #    print(texti, segments)
        #
        idf_value = np.log(total_text / (1 + appears))
        #idf.append([word, cal_idf])
        idf_vector.append(idf_value)

    #idf_table = pd.DataFrame(idf, columns=['word', 'num']).set_index('word')

    #
    return idf_vector


# ---支持Raw Text 或 分词Segments---
def Text_to_Vector_TFIDF(text, vocabulary, idf_vector, isSegments=False):
    #

    if isSegments:
        segments = text
    else:
        segments = Text_to_WordSegment(text)

    term_count_byword = collections.Counter(segments)

    tf_vector = []
    length = len(segments)
    for word in vocabulary:
        # print(word)
        if word not in term_count_byword or length == 0:
            tf = 0
        else:
            tf = term_count_byword[word] / length
        #
        # tf_vector.append([word, tf])
        tf_vector.append(tf)

    # TF X IDF
    # tf_vector = pd.DataFrame(tf_vector, columns=['word', 'num']).set_index('word')
    # tf_idf = tf_vector * idf_vector
    tf_idf = np.multiply(tf_vector, idf_vector).tolist()
    return tf_idf


def SaveTags(database, articleId, tagDocs, methdoName, realtime=None):
    # Save to Database
    for tagDoc in tagDocs:
        newDoc = {}
        newDoc["article_id"] = articleId
        newDoc["tag"] = tagDoc["Tag"]
        newDoc["tag_id"] = tagDoc["Id"]
        newDoc["weight"] = tagDoc["Weight"]
        newDoc["method"] = methdoName
        database.Upsert("rec_article", "article_tag",
                        target={"article_id": articleId, "method": methdoName},
                        document=newDoc)
        # database.Insert("rec_article", "article_tag", document=newDoc)
    # Cache to Redis


# ---Batch-Add tag Interface---
def AddTag_on_Article_Batch(database, filter, method):
    #
    print("AddTag_on_Article_Batch")

    # ---build params---
    params = {}
    params["Method"] = method
    params["DataBase"] = database
    params["StartIndex"] = 39401
    params["EndIndex"] = 0

    #
    Batch_Loop(database, "rec_article", "article_publish",
               callback=AddTag_on_Article_Batch_Callback,
               params=params,
               batch_size=1000,
               printField="title",
               filter=filter
               )

    pass


def AddTag_on_Article_Batch_Callback(count, documents, params):
    # ---Retrieve Parameters---
    database = params["DataBase"]
    method = params["Method"]
    startIndex = params["StartIndex"]
    endIndex = params["EndIndex"]

    # id Cache
    methodParams = {}
    methodParams["IDbyWord"] = {}

    # ---Loop whitin Batch---
    for articleDoc in documents:
        count += 1

        if count < startIndex:
            continue

        print("Process", count, articleDoc["title"], datetime.datetime.now())
        articleId = articleDoc["id"]
        text = articleDoc["content"]

        if text == None:
            continue

        # Call Interface to Process
        tags = AddTag_on_Article(database, articleId, text, method, methodParams)

        print("Tags", count, tags)

    # ---End of Loop---
    print("AddTag_on_Article_Batch End, Avg Time ", Global.TOTAL_TIME / count)
    return count


# ---Add tag Interface---
def AddTag_on_Article(database, articleId, text, method="TopTFIDF", methodParams={}):

    # ---Compatible with Multi Adding-Tag Method---
    methodCallback = None
    if method == "PreDefined":
        methodCallback = TagMethod_PreDefined
    elif method == "TopTFIDF":
        methodCallback = TagMethod_TopTFIDF
        methodParams["Database"] = database # 需要知道数据库（idf values）
        methodParams["Top"] = 5 # 保留前X个标签
    else:
        print("No AddTag Method Defined or Wrong Name: ", method)

    # ---Pre load tag_id as CACHE---
    if "IDbyWord" not in methodParams or len(methodParams["IDbyWord"]) == 0:
        wordDocs = database.Find("rec_article", "tag_keyword")
        methodParams["IDbyWord"] = {}
        for wordDoc in wordDocs:
            word = wordDoc["name"]
            id = wordDoc["id"]
            methodParams["IDbyWord"][word] = id

    # ---Callback to Add Tags---
    tags = []
    if methodCallback:
        # 去除Html标记
        # soup = BeautifulSoup(text)
        # text = soup.get_text()
        #
        datetime1 = datetime.datetime.now()
        tags = methodCallback(text, methodParams)
        datetime2 = datetime.datetime.now()
        Global.TOTAL_TIME += (datetime2 - datetime1).total_seconds()

    # ---Match tag with tag_id---
    idbyWord = methodParams["IDbyWord"]
    tagsWithID = []
    for tag in tags:
        word = tag["Word"]
        weight = tag["Weight"]
        tagsWithID.append({"Tag": word, "Id": idbyWord[word], "Weight": weight})

    # ---Save To Database---
    SaveTags(database, articleId, tagsWithID, method)
    return tagsWithID


# 只要标签库中存在即视为本文标签
def TagMethod_PreDefined(text, params):

    # Retrieve
    idbyWord = params["IDbyWord"]
    # idfByWord = params["IDFbyWeight"]

    # 切词
    segments = Text_to_WordSegment(text)

    tags = []
    # Process
    for word in segments:
        if word in idbyWord.keys() and word not in tags:
            tags.append(word)

    return tags


# 以TFIDF权重最高的X个为文本标签
def TagMethod_TopTFIDF(text, params):

    # Retrieve
    idbyWord = params["IDbyWord"]
    database = params["Database"]

    # Cache when first to Run
    if "IDFValues" not in params:
        idfValues = {}
        # sqlCommand = "SELECT tag_id, tag, max(modified_time) from rec_article.tag_idf GROUP BY tag_id"
        documents = database.GroupBySort("rec_article", "tag_idf", fields=["tag_id", "tag", "idf_value"], groupby=["tag_id"], sortby={"modified_time": 1})
        for document in documents:
            idfValues[document["tag"]] = document["idf_value"]
        params["IDFValues"] = idfValues
        pass
    idfValues = params["IDFValues"]

    # 切词
    segments = Text_to_WordSegment(text)
    totalCount = len(segments)
    wordCount = collections.Counter(segments)

    tfidfList = []
    for word, count in wordCount.items():
        # 必须在标签库范围内
        if word not in idbyWord.keys():
            continue
        #
        tf = wordCount[word] / totalCount

        # 新词 idf 未被计算:
        idf = 0
        if word not in idfValues:
            idf = np.log(10000) # 预设1/10000的出现概率
        else:
            idf = idfValues[word]
        #
        tfidf = tf * idf
        tfidfList.append({"Word": word, "Value": tfidf})

    # Sort
    sortedTFidfList = sorted(tfidfList, key=lambda element: element["Value"], reverse=True)
    top = params["Top"]
    tags = []
    for i in range(len(sortedTFidfList)):
        doc = {}
        doc["Word"] = sortedTFidfList[i]["Word"]
        doc["Weight"] = sortedTFidfList[i]["Value"]
        tags.append(doc)
        if i >= top:
            break

    return tags


def TagMethod_TopAssociated():
    pass
    return []

