# -*- coding: utf-8 -*-

import pandas as pd
import numpy as np
from bs4 import BeautifulSoup   #将html转文档
import jieba   #分词
from jieba import posseg
from collections import Counter   #分词
import collections
from sklearn.utils import shuffle
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer, TfidfVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
import copy
import random
import datetime
import matplotlib.pyplot as plt
import Core.MySQLDB as MySQLDB
import codecs
import Recommender.FeatureRepresentation as Feature
import Core.MongoDB as db

# ---将文章分词储存---
def Process(data):
    segments_list = []

    #
    for n in range(len(data)):
        html = data.loc[n, 'content']

        # 去除Html标记
        soup = BeautifulSoup(html)
        text = soup.get_text()

        # 切词本体
        segments = WordSegment_Jieba(text)

        # 字符串化
        segments_list.append(" ".join(segments))

    #
    return segments_list


# ---Text to Segments and Save to Database---
def Text_to_WordSegments_Batch(database):
    filter = {"limit": 1000}
    filter = {}
    articleDocs = database.Find("Recommender", "Article", filter)
    #
    i = 0
    for articleDoc in articleDocs:
        i += 1
        #if i < 85600:
        #    continue
        text = articleDoc["content"]
        id = articleDoc["Key"]
        print(i, datetime.datetime.now())
        segments = WordSegment(text)
        database.Upsert("Recommender", "Article", target={"Key": id}, document={"segments": segments})
        pass


def WordSegment_Jieba(text):

    # 词性
    pos = ['n', 'nz', 'nt', 'nr', 'ns', 'v', 'vn', 'j']
    raw_segments = posseg.cut(text)
    segments = []
    for j in raw_segments:
        if (len(j.word) >= 2) and (j.flag in pos):
            segments.append(j.word)

    #
    return segments


def WordSegment(text):
    return WordSegment_Jieba(text)


#---Calc IDF Vector---
def Calc_IDFVector(texts, vocabulary):
    #idf = []
    idf_vector = []

    #
    total_text = len(texts)

    #
    segmentsList = []
    for texti in range(total_text):
        text = texts[texti]
        segments = WordSegment(text)
        segmentsList.append(segments)

    #
    i = 0
    for word in vocabulary:
        i += 1
        if i % 100 == 0:
            print("Processed ", i, " words")

        appears = 0
        for texti in range(total_text):
            # content = data.loc[s,'content']
            segments = segmentsList[texti]
            if word in segments:
                appears += 1
            #else:
            #    print(texti, segments)
        idf_value = np.log(total_text / (1 + appears))
        #idf.append([word, cal_idf])
        idf_vector.append(idf_value)

    #idf_table = pd.DataFrame(idf, columns=['word', 'num']).set_index('word')

    #
    return idf_vector


# ---支持Raw Text 或 分词Segments---
def Text_to_Vector_TFIDF(text, vocabulary, idf_vector, isSegments=False):
    #

    if isSegments:
        segments = text
    else:
        segments = WordSegment(text)

    term_count_byword = collections.Counter(segments)

    tf_vector = []
    length = len(segments)
    for word in vocabulary:
        # print(word)
        if word not in term_count_byword or length == 0:
            tf = 0
        else:
            tf = term_count_byword[word] / length
        #
        # tf_vector.append([word, tf])
        tf_vector.append(tf)

    # TF X IDF
    # tf_vector = pd.DataFrame(tf_vector, columns=['word', 'num']).set_index('word')
    # tf_idf = tf_vector * idf_vector
    tf_idf = np.multiply(tf_vector, idf_vector).tolist()
    return tf_idf


# 预置词库
def Get_Vocabulary(databaseSimulation, filepath):
    #databaseSimulation = MySQLDB.MySQLDB("172.25.28.10", "3306", username="stock", password="stock123@PWD")
    tag_keyword = databaseSimulation.Find('rec_article', 'tag_keyword')
    vocabulary = []
    f = codecs.open(filepath, 'w+', 'utf-8')
    for tag in tag_keyword:
        word = tag['name']
        vocabulary.append(word)
        f.write(word + '\r\n')
    f.close

    return vocabulary


def WordSegment_vocabulary(filepath, text):
    jieba.load_userdict(filepath)          #filepath = "D:/vocabulary.txt"
    # 词性
    pos = ['n', 'nz', 'nt', 'nr', 'ns', 'v', 'vn', 'j']
    raw_segments = posseg.cut(text)
    segments = []
    for j in raw_segments:
        if (len(j.word) >= 2) and (j.flag in pos):
            segments.append(j.word)
    #
    return segments

def Text_to_WordSegments_Batch(database):
    #filter = {"limit": 1}
    filter = {}
    articleDocs = database.Find("Recommender", "Article", filter)
    #
    i = 0
    #datetime1 = datetime.datetime.now()
    for articleDoc in articleDocs:
        i += 1
        #if i < 19000:
            #continue
        text = articleDoc["content"]
        id = articleDoc["Key"]
        print(i, datetime.datetime.now())
        segments = Feature.WordSegment_Jieba(text, userdict_pathfilename="D:/vocabulary.txt")
        database.Upsert("Recommender", "Article", target={"Key": id}, document={"segments": segments})
        pass

    #datetime2 = datetime.datetime.now()
    #print("average time:  " + str((datetime2 - datetime1).total_seconds() / i))


#databaseSimulation = MySQLDB.MySQLDB("172.25.28.10", "3306", username="stock", password="stock123@PWD")
#vocabulary = Get_Vocabulary(databaseSimulation, filepath="D:/vocabulary.txt")
#tag_keyword = databaseSimulation.Find('rec_article', 'tag_keyword')
#print(vocabulary)
database = db.MongoDB("10.13.144.179", "27017", "root", "kirk2019")
Text_to_WordSegments_Batch(database)
