#!/usr/bin/python
# -*- coding: utf-8 -*-
# author:pxz
import os
import sys

from bson import ObjectId

base_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(base_path)
import scipy.stats
import numpy as np
from pymongo import MongoClient


def KLD(row1, row2):
    '''
    求KL散度，其值越小，表示连个分布越接近
    :param row1: 文档向量分布1
    :param row2: 文档向量分布2
    :return: KL散度值
    '''
    r1 = [np.float(i) for i in row1.split(',')]
    r2 = [np.float(j) for j in row2.split(',')]
    KL = scipy.stats.entropy(r1, r2)
    return KL


def distOJLD(row1, row2):
    '''
    求欧几里德距离，其值越小，表示两文档越相似
    :param row1: 文档向量分布1
    :param row2: 文档向量分布2
    :return: 距离值
    '''
    dist = list()
    r1 = row1.split(',')
    r2 = row2.split(',')
    len1 = len(r1)
    len2 = len(r2)
    if len1 <= len2:
        for i in range(len1):
            e = (float(r1[i]) - float(r2[i])) ** 2
            dist.append(e)
    s = sum(dist)
    return 1.0 / (1 + s ** .5)


def sortId(res):
    '''
    按照值的大小，取前5个
    :param res: 带有文档id的字符串
    :return: 排序后的文档
    '''
    res.sort(key=lambda x: float(x.split("@")[0]))
    return res


def getDocSimilarity(host, port, db_name, WEB1_VEC, WEB2_VEC):
    '''
    获取在集合2的所有文档中与集合1某一文档最相近的前5篇文档
    :param host: IP值
    :param port:端口
    :param db_name: 数据库
    :param WEB_DATA1: 集合1，文档分布向量
    :param WEB_DATA2: 集合2，文档分布向量
    :return:
    '''
    client = MongoClient(host=host, port=port)
    db = client[db_name]
    collection2 = db[WEB1_VEC]  # 连接集合1
    collection1 = db[WEB2_VEC]  # 连接集合2
    for item1 in collection1.find().limit(5):
        res = list()
        for item2 in collection2.find():
            row1 = item1.get("topicDistribution").strip("\"[]")
            row2 = item2.get("topicDistribution").strip("\"[]")
            id1 = str(item1.get("id"))
            id2 = str(item2.get("id"))
            # 欧几里德距离
            s = distOJLD(row1, row2)
            s = str(s) + "@" + id1 + "&" + id2
            res.append(s)
            # KL散度
            # kl = KLD(row1, row2)
            # kl = str(kl) + "@" + id1 + "&" + id2
            # res.append(kl)
        dis = sortId(res)[:5]
        # with open('./OJLD.txt', 'a') as f:
        #     for i in dis:
        #         f.write(i)
        #         f.write(',')
        #     f.write('\n')
        # print(dis)
        getDoc(host, port, db_name, WEB1_DATA, WEB2_DATA, dis)


def getDoc(host, port, db_name, WEB1_DATA, WEB2_DATA, dis):
    # 前五篇文章
    id2_l = list()
    id1 = dis[0].split("@")[1].split("&")[0]
    for id in dis:
        id2_l.append(id.split("&")[1])

    client = MongoClient(host=host, port=port)
    db = client[db_name]
    collection1 = db[WEB1_DATA]  # 连接集合1
    collection2 = db[WEB2_DATA]  # 连接集合2
    print("*********前5篇文章************")
    for i in id2_l:
        a = collection1.find_one({"_id": ObjectId(i)}).get('html')
        a = a.replace('\r', '').replace('\n', '').replace('\t', '')
        print("文章id：%s,文章内容：%s" % (i, a))
    print("*********文章1************")
    b = collection2.find_one({"_id": ObjectId(id1)}).get('html')
    print(b)
    print('*********分割线************')


if __name__ == "__main__":
    pass
    host = '175.102.18.112'
    port = 27018
    db_name = 'tongji_zjj'
    WEB1_VEC = 'web1_vec15'  # web1_vec30, test_web1
    WEB2_VEC = 'web2_vec15'
    WEB1_DATA = 'lda_sum_data'
    WEB2_DATA = 'info_web'
    getDocSimilarity(host, port, db_name, WEB1_VEC, WEB2_VEC)
