import Recommender.FeatureRepresentation as Feature
import Recommender.RecommenderTagBased as RecommenderTagBased
import datetime
import random
import pandas as pd
import Core.IO as IO
import Core.Global as Global

# from Core.Config import *
# config = Config()
# database = config.DataBase()
# realtime = config.RealTime(db=1)

from Recommender.Config import *
config = Config()
database = config.DataBase()


def Add_CorpusTag():
    database.creatIndex("Recommender", "Article", "is_corpus")
    idList = database.find("Recommender", 'Corpus', query={"Name": "Corpus5"})[0]['IDList']
    count = 0
    for id in idList:
        count += 1
        print(count)
        database.Upsert("Recommender", "Article", target={"Key": id}, document={"is_corpus": True})

# ---Calc IDF Value based on certain Corpus---
def Calc_IDF():
    # load vocabulary
    vocabularyDoc = database.Find("Recommender", "Vocabulary", {"Key":"Corpus5_TF"})
    vocabulary = vocabularyDoc[0]["Vocabulary"]

    # load corpus
    corpusDoc = database.Find("Recommender", "Corpus", {"Name":"Corpus5"})
    idList = corpusDoc[0]["IDList"]

    # load articles
    # filter = {"limit": 10}
    filter
    articleDocs = database.Find("Recommender", "Article", filter)

    # filter articles within corpus
    corpus = []
    for articleDoc in articleDocs:
        id = articleDoc["id"]
        if id in idList:
            print(id)
            corpus.append(articleDoc["content"])

    # calculate idf
    idf_vector = Feature.Calc_IDFVector(corpus, vocabulary)

    # save to database
    database.Upsert("Recommender", "Vocabulary", {"Key":"Corpus5_TF"}, {"IDF":idf_vector})


# ---把文章向量化，储存---
def Text_to_Vector_Batch(database, callback):

    # load vocabulary
    vocabularyDoc = database.Find("Recommender", "Vocabulary", {"Key": "Corpus5_TF"})
    vocabulary = vocabularyDoc[0]["Vocabulary"]
    idf_vector = vocabularyDoc[0]["IDF"]

    filter = {}
    filter = {"limit": 1000}
    articleDocs = database.Find("Recommender", "Article", filter)

    #
    data = []
    for articleDoc in articleDocs:
        text = articleDoc["content"]
        segments = articleDoc["segments"]
        vector_tfidf = Feature.Text_to_Vector_TFIDF(segments, vocabulary, idf_vector, isSegments=True)

        # Process
        for i in range(len(vector_tfidf)):
            if vector_tfidf[i] > 0:
                data.append([vocabulary[i], vector_tfidf[i]])
        pass

    dfData = pd.DataFrame(data, columns=['word', 'num'])
    dfData.to_csv("D:/data/tag/tagIDFValue.csv")


def Print_Vocabulary(pathFileName, vocabularyName):
    vocabularyDoc = database.Find("Recommender", "Vocabulary", {"Name": vocabularyName})
    vocabulary = vocabularyDoc[0]["Vocabulary"]
    IO.WriteListToFile(pathFileName, vocabulary)


def Check_MissingUsers_WhoReadArticles():
    # tags = realtime.GetHashDocument("Article_Tags_tfidf_Batch1", article)
    users = realtime.GetHashObjects("User_Article_Batch1")
    for bKey, value in users.items():
        sKey = bKey.decode(encoding='utf-8')

        #
        #c = realtime.GetHashDocument("User_Tags_Batch1", bKey)
        d = realtime.GetHashDocument("User_Tags_Batch1", sKey)

        if d == None:
            print(sKey)

        #
        x = 0

    k = 0

import os
Feature.Build_UserDefinedVocabularyFile(database, os.getcwd() + "/UserDefinedVocabulary.txt")

# Text -> Segments
# Add_CorpusTag()
# Feature.Text_to_WordSegments_Batch()

# ---以最近10000篇为基础计算IDF---
# Feature.Calc_IDF_Batch(database, {"limit": 10000, "orderby": {"publish_time": -1}})
# Print_Vocabulary("D:/Data/Vocabulary/Corpus5_TF.csv", "Corpus5_TF")


# ---Vectorizer---
# Text_to_Vector_Batch()


# ---Add Tags to Item/Artic---
# database.creatIndex("Recommender", "TagsOnArticle", "id")
# 0.02 about 18% word as Tag based on 1000 Text Corpus
# RecommenderTagBased.AddTag_on_Article(database, vocabularyName="Corpus5_TF", minimal_tfidf_value=0.02)
# RecommenderTagBased.AddTag_on_Article(database, vocabularyName="Corpus4_LDA_90", minimal_tfidf_value=0.02)


# ---Add Tags to Item/Artic---
# method: PreDefined, TopTFTDF
# filter = {"limit": 10000, "orderby": {"publish_time": -1}}
filter = {}
Feature.AddTag_on_Article_Batch(database, filter, "TopTFIDF")

# t = database.Find("rec_article", "article_publish", filter={"id":"15366439"})
# t = t[0]
# Feature.AddTag_on_Article(database, "15366439", t["content"])
# a = 0