# coding:utf-8

import sys
import gensim
import sklearn
import numpy as np
import jieba
import csv
import json
import requests
from gensim.models.doc2vec import Doc2Vec, LabeledSentence

TaggededDocument = gensim.models.doc2vec.TaggedDocument

def similarity(a_vect, b_vect):
    dot_val = 0.0
    a_norm = 0.0
    b_norm = 0.0
    cos = None
    for a, b in zip(a_vect, b_vect):
        dot_val += a*b
        a_norm += a**2
        b_norm += b**2
    if a_norm == 0.0 or b_norm == 0.0:
        cos = -1
    else:
        cos = dot_val / ((a_norm*b_norm)**0.5)
    return cos

class HandleCsv:
    # 定义存放csv内容的list
    csv_list = []

    def __init__(self, filename):
        self.filename = filename
        with open(self.filename,encoding='utf-8')as fp:
            self.csv_list = list(csv.reader(fp))

    # 在第N行第M列空白单元格处修改内容
    def modify(self, n, m, value):
        self.csv_list[n - 1][m - 1] = value

    # 插入第N行
    def insert_row(self, n):
        self.csv_list.insert(n - 1, [])

    # 在第N行第M列单元格插入
    def insert_col(self, n, m, value):
        # 如果该单元格左边的单元格为空，那么先对左边的单元格写入空格
        if len(self.csv_list[n - 1]) < m:
            if len(self.csv_list[n - 1]) == m - 1:
                self.csv_list[n - 1].append(value)
            else:
                for i in range(len(self.csv_list[n - 1]), m - 1):
                    self.csv_list[n - 1].append('')
                self.csv_list[n - 1].append(value)
        else:
            self.modify(n, m, value)

    # 删除第N行
    def del_row(self, n):
        del self.csv_list[n - 1]

    # 获取第n行第m列单元格内容
    def get_value(self, n, m):
        return self.csv_list[n - 1][m - 1]

    def list2csv(self, file_path):
        try:
            fp = open(file_path, 'w')
            for items in self.csv_list:
                for i in range(len(items)):
                    # 若元素中含有逗号，那么需要加双引号
                    if items[i].find(',') != -1:
                        fp.write('\"')
                        fp.write(items[i])
                        fp.write('\"')
                    else:
                        fp.write(items[i])
                    # 最后一个元素不用加逗号
                    if i < len(items) - 1:
                        fp.write(',')
                fp.write('\n')
        except Exception as e:
            print(e)

    


def get_datasest():
    with open(u"./data/title_fenci.txt",'r',encoding='utf-8',errors='ignore') as cf:
        docs = cf.readlines()
        print (len(docs))

    x_train = []
    #y = np.concatenate(np.ones(len(docs)))
    for i, text in enumerate(docs):
        word_list = text.split(' ')
        l = len(word_list)
        word_list[l-1] = word_list[l-1].strip()
        document = TaggededDocument(word_list, tags=[i])
        x_train.append(document)

    return x_train

def getVecs(model, corpus, size):
    vecs = [np.array(model.docvecs[z.tags[0]].reshape(1, size)) for z in corpus]
    return np.concatenate(vecs)

def train(x_train, size=200, epoch_num=1):
    model_dm = Doc2Vec(x_train,min_count=1, window = 5, size = size, sample=1e-3, negative=5, workers=4)
    model_dm.train(x_train, total_examples=model_dm.corpus_count, epochs=70)
    model_dm.save('./modle/doc2vec3.model')

    return model_dm

def get_sentiment_result(text):
    if text == '':
        return ''
    # 请求接口
    url = 'https://aip.baidubce.com/oauth/2.0/token'
    access_token = '24.a7da587a6c20ca031ddc3ba45301ac68.2592000.1591604153.282335-19795418'
    # 通用版情绪识别接口
    url = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify'
    params = {
        'access_token': access_token
    }
    payload = json.dumps({
        'text': text
    })
    headers = {'Content-Type': 'application/json; charset=UTF-8'}
    response = requests.post(url=url, params=params, data=payload, headers=headers).json()
    return response

def test():
    model_dm = Doc2Vec.load("./modle/doc2vec3.model")

     
    filename = "./data/testcsv.csv"
    list1 = []
    with open(filename, 'r+',encoding='UTF-8') as file:
        reader = csv.DictReader(file)
        column = [row['CONTENT'] for row in reader]
        sim = [row['SIM'] for row in reader]
        #print(column)
    #此处设置的处理三百条数据，如果需要增加调整最后那个数据300到想的数据量即可
    for i in range(0,300):#2861
        print(column[i])
        lenth=len(column[i])
        sen=column[i]
        #print(lenth)
        column[i].replace('\t', '').replace('\n', '').replace(' ','')
        seg_list = jieba.cut(column[i], cut_all=False)
        f2 =open("./data/text_fenci.txt", 'w',encoding='utf-8',errors='ignore')
        f2.write(" ".join(seg_list))
        f2.close()
        f3 =open("./data/text_fenci.txt", 'r',encoding='utf-8',errors='ignore')
        test_text = f3.read()
        f3.close()
        #print(test_text)
        inferred_vector_dm = model_dm.infer_vector(test_text.split())
        #print (inferred_vector_dm)
        sims = model_dm.docvecs.most_similar([inferred_vector_dm], topn=1)
        for sim in sims:
            sentence = test_text
            words = ''
            for word in sentence[0]:
                words = words + word + ' '
                #print (words, sim, len(sentence[0]))
                str1=" ".join('%s' % id for id in sim)
                cut1 = str1.index(' ')+1
                print (str1[cut1:])
                intput=str1[cut1:]
                length=str(lenth)
                h_csv = HandleCsv(u'./data/testcsv.csv')
                h_csv.insert_col(i+2, 5, intput)
                h_csv.list2csv(u'./data/testcsv.csv')
                h_csv2 = HandleCsv(u'./data/testcsv.csv')
                h_csv2.insert_col(i+2, 6, length)
                h_csv2.list2csv(u'./data/testcsv.csv')
                #print(get_sentiment_result(sen))
                out1 = str(get_sentiment_result(sen)).split("'positive_prob': ")
                out2 = str(out1[1]).split(", 'confidence':")
                print(out2[0])
                jiji=out2[0]
                h_csv3 = HandleCsv(u'./data/testcsv.csv')
                h_csv3.insert_col(i+2, 7, jiji)
                h_csv3.list2csv(u'./data/testcsv.csv')   
                if float(jiji)>0.5:
                    h_csv4 = HandleCsv(u'./data/testcsv.csv')
                    h_csv4.insert_col(i+2, 8, "积极")
                    h_csv4.list2csv(u'./data/testcsv.csv')                
                else:
                    h_csv4 = HandleCsv(u'./data/testcsv.csv')
                    h_csv4.insert_col(i+2, 8, "消极")
                    h_csv4.list2csv(u'./data/testcsv.csv')



if __name__ == '__main__':
    #获取数据
    x_train = get_datasest()
    #读取模型
    #E:\\信息安全竞赛\\test - 副本\\modle\\doc2vec3.model可以改为/modle/doc2vec3.model
    model_dm = Doc2Vec.load("./modle/doc2vec3.model")
    #处理数据
    test()
