import os
import jieba
import fileinput
from neo4j.v1 import GraphDatabase
from settings import NEO4J_CONFIG
from itertools import chain
from model_train.multithread_predict import request_model_server
from functools import reduce
import pandas as pd

userdict_path = os.path.join(os.path.dirname(__file__), "userdict.txt")
stopdict_path = os.path.join(os.path.dirname(__file__), "stopdict.txt")

jieba.load_userdict(userdict_path)
MAX_LIMIT = 200

def get_index_map_label(word_list):
    if not word_list:
        return []
    _driver = GraphDatabase.driver(**NEO4J_CONFIG)
    with _driver.session() as session:
        def _f(index, word):
            if not word:
                return []
            cypher = "MATCH(a:Vocabulary{name:%r})-[r:Related]-(b:Label) RETURN b.title, r.weight" % (word)
            record = session.run(cypher)
            result = list(map(lambda x: [x[0], x[1]], record))
            if not result:
                return []
            return [str(index), result]
        index_map_label = list(chain(*map(lambda x: _f(x[0], x[1]), enumerate(word_list))))
    return index_map_label


def handle_cn_text(text:str):
    if not text:
        return []
    word_list = jieba.cut(text[:MAX_LIMIT])

    def _load_stop_dict():
        stop_word_set = set(map(lambda x: x.strip(), fileinput.FileInput(stopdict_path)))
        return stop_word_set

    stop_word_set = _load_stop_dict()
    word_list = list(filter(lambda x: x not in stop_word_set, word_list))
    return word_list


def label(text):
    label = "稀罕音乐"
    return label

def weight_update(word_list, index_map_label):
    index_map_label = dict(zip(index_map_label[::2], index_map_label[1::2]))
    for k, v in index_map_label.items():
        label_list = list(map(lambda x: x[0], v))

        v = request_model_server(word_list, label_list)
        index_map_label.update({k:v})


    index_map_label_ = list(chain(*map(lambda x: [x[0], x[1]], index_map_label.items())))
    return index_map_label_


def control_increase(index_map_label_):
    # ---> index_map_label_ = ["2", [["情感故事", 0.765]], "3", [["情感故事", 0.876], ["明星", 0.765]]]
    # <----{'label': "明星", 'score':0.765}
    if not index_map_label_:
        return []

    reduce_ret = reduce(lambda z, y: z+y, index_map_label_[1::2]) #[['情感故事', 0.765], ['情感故事', 0.876], ['明星', 0.765]]
    k = list(map(lambda x : {"label": x[0], "score": x[1]}, reduce_ret))
    #print(k) # [{'label': '情感故事', 'score': 0.765}, {'label': '情感故事', 'score': 0.876}, {'label': '明星', 'score': 0.765}]
    df = pd.DataFrame(k)
    df_ = df.groupby("label")["score"].sum()
    #print(df)
    return df_

import numpy as np
def father_label_and_normalized(df_):
    '''
    以概率调整后的Dataframe对象为输入，以整个系统的最终结果为输出
    :param df_:
                label
                情感故事    1.641
                明星      0.765
                Name: score, dtype: float64
    :return: [("label": "LOL", "score": "0.811", "relate":["游戏"])]
    '''
    def _sigmod(x):
        y = 1.0/(1.0 + np.exp(-x))
        return round(y, 3)
    def _sg(pair):
        _driver = GraphDatabase.driver(**NEO4J_CONFIG)
        with _driver.session() as session:
            cypher = "MATCH(a:Label{title:%r})<-[r:Contain*1..3]-(b:Label) WHERE b.title <> '泛娱乐' RETURN b.title" % pair[0]
            record = session.run(cypher)
            result = list(map(lambda x: x[0], record))
        return {"label": pair[0], "score": _sigmod(pair[1]), "related": result}
    # print(df_.to_dict().items()) # dict_items([('情感故事', 1.641), ('明星', 0.765)])
    return list(map(_sg, df_.to_dict().items()))

if __name__ == '__main__':

    #text = "我的眼睛很大很大,可以装得下天空，装得下嵩山，装得下大海，装得下整个世界;我的眼睛又很小很小，有心事时，就连两行眼泪也装不下."
    #word_list = handle_cn_text(text)
    #print(word_list)
    #index_map_label = get_index_map_label(word_list)
    #print(index_map_label)
    #index_map_label =  ["0", [["美妆", 0.654], ["情感故事", 0.765]]] #
    #index_map_label_ = weight_update(word_list, index_map_label)
    #print(index_map_label_)
    index_map_label_ = ["2", [["情感故事", 0.765]], "3", [["情感故事", 0.876], ["明星", 0.765]]] #
    df_ = control_increase(index_map_label_)
    '''
    df_:
        label
        情感故事    1.641
        明星      0.765
        Name: score, dtype: float64
    '''
    result = father_label_and_normalized(df_)
    #print(result) # [{'label': '情感故事', 'score': 0.838, 'related': []}, {'label': '明星', 'score': 0.682, 'related': []}]


