# from input import input_in
from input import input_in
import warnings

warnings.filterwarnings('ignore')  # 警告扰人，手动封存
from gensim.models import Doc2Vec
import gensim

from keras.models import load_model
import numpy as np


class perAndPos:
    def __init__(self, username, pos_per):
        self.username = username
        # self.personality = personality
        self.pos_per = pos_per

    def getpersonalitty(self):
        return self.personality

    def setpersonalitty(self, personality):
        self.personalitty = personality

    def getUserName(self):
        return self.username

    def setUserName(self, name):
        self.username = name

    def getPos_per(self):
        return self.pos_per

    def setPos_per(self, pos_per):
        self.pos_per = pos_per


class result:
    def __init__(self, isRumor, per_pos_ALL):
        self.isRumor = isRumor
        self.per_pos_ALL = per_pos_ALL

    def getPerAndPos(self):
        return self.per_pos_ALL

    def setPerAndPos(self, per_pos_ALL):
        self.per_pos_ALL = per_pos_ALL

    def getresult_rumor(self):
        return self.isRumor

    def setresult_rumor(self, isRumor):
        self.isRumor = isRumor
# 文本处理成向量
def cut_sentence(text):
    result = []
    for each in text:
        result.append(' '.join(each))
    return result


def X_train(cut_sentence, TaggededDocument):
    x_train = []
    for i, text in enumerate(cut_sentence):
        word_list = text.split(' ')
        l = len(word_list)
        word_list[l - 1] = word_list[l - 1].strip()
        document = TaggededDocument(word_list, tags=[i])
        x_train.append(document)
    return x_train


def train(x_train, size=300):
    model = Doc2Vec(x_train, min_count=1, window=3, vector_size=size, sample=1e-3, negative=5, workers=4)
    model.train(x_train, total_examples=model.corpus_count, epochs=10)
    return model


def doc2vec(usertext):
    list = []
    for i in range(300):
        list.append(i)
    b = cut_sentence(usertext)
    TaggededDocument = gensim.models.doc2vec.TaggedDocument
    c = X_train(b, TaggededDocument)
    model_dm = train(c)
    for i in range(len(usertext)):
        li = usertext[i]
        word = li.split(" ")
        inferred_vector = model_dm.infer_vector(doc_words=word, alpha=0.025, steps=100000)
        if i == 0:
            vector = inferred_vector[np.newaxis]
        else:
            vector = np.vstack([vector, inferred_vector])
    return vector


# 模型部分
def built_model(x_test):
    model = load_model('perMTL/model.h5')
    pred_probabilities_a, pred_probabilities_b, perd_per = model.predict(
        {'inputs_ab': x_test, 'person_input': x_test},  # test向量值丢进去预测标签
        verbose=0)
    Y_pred_a = np.argmax(pred_probabilities_a, axis=2)
    pred_probabilities_b=np.squeeze(pred_probabilities_b)
    Y_pred_b=[]
    for i in pred_probabilities_b:
        a=format(i,'.2%')
        Y_pred_b.append(a)
    return np.squeeze(Y_pred_a), Y_pred_b


def rumor_model(user):          #输入：字符串       输出：谣言输出：列表    立场输出：字典类型
    # 前端
    ins = list()
    for k, v in user.items():
        ins.append(input_in(k, v))
    # 后端u
    texts = list()
    for i in range(len(ins)):
        user = ins[i]
        text = user.getUserText()
        texts.append(text)

    result_doc = doc2vec(texts)
    result_doc = result_doc[np.newaxis]
    Y_pred_a, Y_pred_b = built_model(result_doc)

    per_pos_list = dict()
    for i in range(1,Y_pred_a.shape[0]):
        # perandpos = perAndPos(ins[i].getUserName(), Y_pred_a[i])
        per_pos_list[ins[i].getUserName()]=int(Y_pred_a[i])
    res = result(Y_pred_b, per_pos_list)  # Y_pred_b标签：0：真；1：假；2：未证实
    empty_dict={}
    empty_dict['tagPercent']=res.getresult_rumor()
    empty_dict['stance']=per_pos_list
    return  empty_dict          # 立场标签：0:评论；1：支持；2：否定；3：疑问

if __name__ == '__main__':
    #测试
    t1=("wang","China's Xinjiang region violates human rights by oppressing working people and forcing them to pick cotton. Let's unite and boycott Xinjiang cotton together.")
    t2=("wang2"," We should boycott Xinjiang cotton. ")
    l = list()
    l.append(t1)
    l.append(t2)
    res_dict = {item[0]:item[1] for item in l}
    r=rumor_model(res_dict)
    print(r)
