# coding:utf-8

import sys
import gensim
import sklearn
import numpy as np
import jieba
import time
import re
import random
import csv
import json
import requests
from gensim.models.doc2vec import Doc2Vec, LabeledSentence


TaggededDocument = gensim.models.doc2vec.TaggedDocument

def similarity(a_vect, b_vect):
    dot_val = 0.0
    a_norm = 0.0
    b_norm = 0.0
    cos = None
    for a, b in zip(a_vect, b_vect):
        dot_val += a*b
        a_norm += a**2
        b_norm += b**2
    if a_norm == 0.0 or b_norm == 0.0:
        cos = -1
    else:
        cos = dot_val / ((a_norm*b_norm)**0.5)
    return cos

class HandleCsv:
    # 定义存放csv内容的list
    csv_list = []

    def __init__(self, filename):
        self.filename = filename
        with open(self.filename)as fp:
            self.csv_list = list(csv.reader(fp))

    # 在第N行第M列空白单元格处修改内容
    def modify(self, n, m, value):
        self.csv_list[n - 1][m - 1] = value

    # 插入第N行
    def insert_row(self, n):
        self.csv_list.insert(n - 1, [])

    # 在第N行第M列单元格插入
    def insert_col(self, n, m, value):
        # 如果该单元格左边的单元格为空，那么先对左边的单元格写入空格
        if len(self.csv_list[n - 1]) < m:
            if len(self.csv_list[n - 1]) == m - 1:
                self.csv_list[n - 1].append(value)
            else:
                for i in range(len(self.csv_list[n - 1]), m - 1):
                    self.csv_list[n - 1].append('')
                self.csv_list[n - 1].append(value)
        else:
            self.modify(n, m, value)

    # 删除第N行
    def del_row(self, n):
        del self.csv_list[n - 1]

    # 获取第n行第m列单元格内容
    def get_value(self, n, m):
        return self.csv_list[n - 1][m - 1]

    def list2csv(self, file_path):
        try:
            fp = open(file_path, 'w')
            for items in self.csv_list:
                for i in range(len(items)):
                    # 若元素中含有逗号，那么需要加双引号
                    if items[i].find(',') != -1:
                        fp.write('\"')
                        fp.write(items[i])
                        fp.write('\"')
                    else:
                        fp.write(items[i])
                    # 最后一个元素不用加逗号
                    if i < len(items) - 1:
                        fp.write(',')
                fp.write('\n')
        except Exception as e:
            print(e)



class taobaoSpider_content():
    """爬虫模块
        get_page：构造出url并且返回json格式的文本数据
        get_cotent：提取数据并返回一个生成器

            itemId:商品ID
            currentPage:评论页码
            sellerId:商家Id
        """

    def __init__(self, itemId, sellerId, currentPage):
        self.currentPage = currentPage
        self.url = "https://rate.tmall.com/list_detail_rate.htm?"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36",
            "referer": "https://detail.tmall.com/item.htm?spm=a220m.1000858.1000725.8.12357de7uI1uOu&id=602977570356&skuId=4304210164437&areaId=510100&user_id=3351172141&cat_id=2&is_b=1&rn=d2044f57561b87fd83ad00ce338f0cd1&on_comment=1",
            "Cookie": "lid=%E8%B6%85%E5%A8%81%E8%93%9D%E7%8C%AB%E6%88%91%E6%9C%89%E7%9F%A5%E8%AF%86%E6%88%91%E8%87%AA%E8%B1%AA; enc=XJwcK4DybRwOk4R5bu%2FJtcXN2m4Kb5LngN9e7EDyoQu6UugNceB3rwsTxLri3HUWF6mWjfbvovS8HVZhYUcAHA%3D%3D; cna=IakcF4CQm3gCAXW7+dxgQxGp; sgcookie=E2lPAmrso4SdnP1qLE8a6; t=1f89b53da080fcee11386787b0604be1; tracknick=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; lgc=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; _tb_token_=318b35fe300eb; cookie2=1e4c25b69b94c9fe58758553009376f5; _m_h5_tk=ebb423f10f34ab8a0296615c7bb2662e_1587917103060; _m_h5_tk_enc=f7ee88a93e9cc1354e99a82f68a8055e; x5sec=7b22726174656d616e616765723b32223a223837333430666631343037653137653938396533306232363963666561346464434c61356c765546454d614f6d624f726d39486f3167453d227d; dnk=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; uc1=existShop=false&cookie14=UoTUPcXJy2NZ6g%3D%3D&cookie15=UIHiLt3xD8xYTw%3D%3D&cookie21=VT5L2FSpccLuJBreK%2BBd&pas=0&cookie16=UIHiLt3xCS3yM2h4eKHS9lpEOw%3D%3D; uc3=nk2=0Jw4ZWKL5ncb%2BZbc%2BzeuWaIvSDzGqg%3D%3D&lg2=U%2BGCWk%2F75gdr5Q%3D%3D&id2=UU6kVzP5MUeA7w%3D%3D&vt3=F8dBxGRyNbP9gah03fE%3D; _l_g_=Ug%3D%3D; uc4=nk4=0%400hYwK0dP9pa%2BHtt%2BZ%2BnIIJJ3vuOLV1en5URtcwfplD3h&id4=0%40U2xpVauIQpog3DB9k9NdS5VHJyyf; unb=2654376244; cookie1=VW7p1uG6ehuQnGiMx3e1bs9aTbIAwbuKJpX8eqzeCcI%3D; login=true; cookie17=UU6kVzP5MUeA7w%3D%3D; _nk_=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; sg=%E8%B1%AA42; csg=17e13678; l=eBSn_cgIQpsWV4I3BOfaFurza77OSIRYYuPzaNbMiT5P_WCW5_kOWZjSZW8XC31Vh6lyR3yAaP04BeYBqIq0x6aNa6Fy_Ckmn; isg=BD09weAUP_lJfZulrCdScNF0TJk32nEsVtiNIf-CeRTBNl1oxyqB_Avg4OpwrYnk"
        # 我自己的Cookie，用来绕过登陆框，可能会失效（目前倒是没遇到过）
        }
        self.itemId = itemId
        self.sellerId = sellerId

    def get_page(self): #用于伪造访问头信息里的3个可变参数
        t_param = time.time()
        t_list = str(t_param).split(".")
        params = {"sellerId": self.sellerId,
                  "itemId": self.itemId,
                  "callback": str(int(t_list[1][3:]) + 1),
                  "_ksTS": t_list[0] + t_list[1][:3] + "_" + t_list[1][3:],
                  "currentPage": self.currentPage
                  }
        res = requests.get(self.url, params=params, headers=self.headers)
        try:
            if res.status_code == 200:
                res = requests.get(self.url, params=params, headers=self.headers).text[len(t_list[1][3:]) + 3:-1]
                res_json = json.loads(res)
                res_str = json.dumps(res_json, indent=4)
                return json.loads(res_str)
        except:
            return None

    def get_content(self, json_data):
        if json_data != None:
            for item in json_data.get("rateDetail").get("rateList"):
                content_time = item.get("rateDate")
                content_type = item.get("auctionSku")
                content_name = item.get("displayUserNick")
                content_data = item.get("rateContent")
                yield {
                    "content_time": content_time,
                    "content_type": content_type,
                    "content_name": content_name,
                    "content_data": content_data,
                }
        else:
            print("当前页面爬取失败")
            return None


    def write_csv(self, data):
        with open("E:\\信息安全竞赛\\test - 副本\\data\\testcsv.csv", "a", encoding="utf-8-sig", newline='') as file:

            fieldnames = ["content_time", "content_type", "content_name", "content_data"]
            writer=csv.DictWriter(file,fieldnames=fieldnames)
            writer.writerow(data)

    def main(self):
        json_data = self.get_page()
        self.get_content(json_data)
        return self.get_content(json_data)



def get_datasest():
    with open(u"E:\\信息安全竞赛\\test - 副本\\data\\title_fenci.txt",'r',encoding='utf-8',errors='ignore') as cf:
        docs = cf.readlines()
        #print (len(docs))

    x_train = []
    #y = np.concatenate(np.ones(len(docs)))
    for i, text in enumerate(docs):
        word_list = text.split(' ')
        l = len(word_list)
        word_list[l-1] = word_list[l-1].strip()
        document = TaggededDocument(word_list, tags=[i])
        x_train.append(document)

    return x_train

def getVecs(model, corpus, size):
    vecs = [np.array(model.docvecs[z.tags[0]].reshape(1, size)) for z in corpus]
    return np.concatenate(vecs)

def train(x_train, size=200, epoch_num=1):
    model_dm = Doc2Vec(x_train,min_count=1, window = 5, size = size, sample=1e-3, negative=5, workers=4)
    model_dm.train(x_train, total_examples=model_dm.corpus_count, epochs=70)
    model_dm.save('E:\\信息安全竞赛\\test - 副本\\modle\\doc2vec3.model')

    return model_dm

def get_sentiment_result(text):
    if text == '':
        return ''
    # 请求接口
    url = 'https://aip.baidubce.com/oauth/2.0/token'
    access_token = '24.a7da587a6c20ca031ddc3ba45301ac68.2592000.1591604153.282335-19795418'
    # 通用版情绪识别接口
    url = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify'
    params = {
        'access_token': access_token
    }
    payload = json.dumps({
        'text': text
    })
    headers = {'Content-Type': 'application/json; charset=UTF-8'}
    response = requests.post(url=url, params=params, data=payload, headers=headers).json()
    return response

def header():
    with open("E:\\信息安全竞赛\\test - 副本\\data\\testcsv.csv", "a", encoding="utf-8-sig", newline='') as file:
        list = ['Timer','KIND','ID','CONTENT','SIM','length','积极性','判定']
        writer = csv.writer(file ,delimiter=',')
        writer.writerow(list)

def test():
    model_dm = Doc2Vec.load("E:\\信息安全竞赛\\test - 副本\\modle\\doc2vec3.model")

     
    filename = "E:\\信息安全竞赛\\test - 副本\\data\\testcsv.csv"
    list1 = []
    with open(filename, 'r+',encoding='utf-8-sig') as file:
        reader = csv.DictReader(file)
        column = [row['CONTENT'] for row in reader]
        sim = [row['SIM'] for row in reader]
        #print(column)
    for i in range(1,2336):#2861
        print(column[i])
        lenth=len(column[i])
        sen=column[i]
        #print(lenth)
        column[i].replace('\t', '').replace('\n', '').replace(' ','')
        seg_list = jieba.cut(column[i], cut_all=False)
        f2 =open("E:\\信息安全竞赛\\test - 副本\\data\\text_fenci.txt", 'w',encoding='utf-8',errors='ignore')
        f2.write(" ".join(seg_list))
        f2.close()
        f3 =open("E:\\信息安全竞赛\\test - 副本\\data\\text_fenci.txt", 'r',encoding='utf-8',errors='ignore')
        test_text = f3.read()
        f3.close()
        #print(test_text)
        inferred_vector_dm = model_dm.infer_vector(test_text.split())
        #print (inferred_vector_dm)
        sims = model_dm.docvecs.most_similar([inferred_vector_dm], topn=1)
        for sim in sims:
            sentence = test_text
            words = ''
            for word in sentence[0]:
                words = words + word + ' '
                #print (words, sim, len(sentence[0]))
                str1=" ".join('%s' % id for id in sim)
                cut1 = str1.index(' ')+1
                print (str1[cut1:])
                intput=str1[cut1:]
                length=str(lenth)
                h_csv = HandleCsv(u'E:\\信息安全竞赛\\test - 副本\\data\\testcsv.csv')
                h_csv.insert_col(i+2, 5, intput)
                h_csv.list2csv(u'E:\\信息安全竞赛\\test - 副本\\data\\testcsv.csv')
                h_csv2 = HandleCsv(u'E:\\信息安全竞赛\\test - 副本\\data\\testcsv.csv')
                h_csv2.insert_col(i+2, 6, length)
                h_csv2.list2csv(u'E:\\信息安全竞赛\\test - 副本\\data\\testcsv.csv')
                #print(get_sentiment_result(sen))
                out1 = str(get_sentiment_result(sen)).split("'positive_prob': ")
                out2 = str(out1[1]).split(", 'confidence':")
                print(out2[0])
                jiji=out2[0]
                h_csv3 = HandleCsv(u'E:\\信息安全竞赛\\test - 副本\\data\\testcsv.csv')
                h_csv3.insert_col(i+2, 7, jiji)
                h_csv3.list2csv(u'E:\\信息安全竞赛\\test - 副本\\data\\testcsv.csv')   
                if float(jiji)>0.5:
                    h_csv4 = HandleCsv(u'E:\\信息安全竞赛\\test - 副本\\data\\testcsv.csv')
                    h_csv4.insert_col(i+2, 8, "积极")
                    h_csv4.list2csv(u'E:\\信息安全竞赛\\test - 副本\\data\\testcsv.csv')                
                else:
                    h_csv4 = HandleCsv(u'E:\\信息安全竞赛\\test - 副本\\data\\testcsv.csv')
                    h_csv4.insert_col(i+2, 8, "消极")
                    h_csv4.list2csv(u'E:\\信息安全竞赛\\test - 副本\\data\\testcsv.csv')



if __name__ == '__main__':

    file = open("E:\\信息安全竞赛\\test - 副本\\data\\URL.txt", "r")#你随便创个桌面txt，把路径改成你的
    # 这个txt是URL集合：把想爬的一个或多个商品天猫URL丢进这个txt即可，URL间以换行符间隔即可

    urllist = file.readlines()  # 每一行数据写入到list中
    for fields in urllist:
        itemID = (re.search(re.compile(".*&id=?(.*?)&.*"), fields)).group(1)
        sellerID = re.search(re.compile(".*&user_id=?(.*?)&.*"), fields).group(1)
        print("当前爬取商品号：" + itemID + " 店铺号：" + sellerID)
        header()
        for i in range(1, 5):  # 前1至5页的评论，随意修改
            new_data = taobaoSpider_content(itemId=itemID, sellerId=sellerID, currentPage=i)
            print('爬取第%d页中...' % (i))
            if new_data.main() != None:
                for items in new_data.main():
                    # new_data.write_txt(items)
                    new_data.write_csv(items)
                    #new_data.write_csv(items)
            else:
                pass
            time.sleep(random.randint(3, 6))  # 设置延时防止爬虫被封

    #获取数据
    x_train = get_datasest()
    #读取模型
    #E:\\信息安全竞赛\\test - 副本\\modle\\doc2vec3.model可以改为/modle/doc2vec3.model
    model_dm = Doc2Vec.load("E:\\信息安全竞赛\\test - 副本\\modle\\doc2vec3.model")
    #处理数据
    test()
