import time
import requests
import json
import csv
import re
import random
import os

def header():
    with open("./data/testcsv.csv", "a", encoding="utf-8-sig", newline='') as file:
        list = ['Timer','KIND','ID','CONTENT','SIM','length','积极性','判定']
        writer = csv.writer(file ,delimiter=',')
        writer.writerow(list)

class taobaoSpider_content():
    """
        get_page方法：构造出url并且返回json格式的文本数据
        get_cotent方法：提取数据并返回一个生成器

        Attributes:
            itemId:商品ID
            currentPage:评论页码
            sellerId:商家Id

        """

    def __init__(self, itemId, sellerId, currentPage):
        self.currentPage = currentPage
        self.url = "https://rate.tmall.com/list_detail_rate.htm?"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36",
            "referer": "https://detail.tmall.com/item.htm?spm=a220m.1000858.1000725.8.12357de7uI1uOu&id=602977570356&skuId=4304210164437&areaId=510100&user_id=3351172141&cat_id=2&is_b=1&rn=d2044f57561b87fd83ad00ce338f0cd1&on_comment=1",
            "Cookie": "lid=%E8%B6%85%E5%A8%81%E8%93%9D%E7%8C%AB%E6%88%91%E6%9C%89%E7%9F%A5%E8%AF%86%E6%88%91%E8%87%AA%E8%B1%AA; enc=XJwcK4DybRwOk4R5bu%2FJtcXN2m4Kb5LngN9e7EDyoQu6UugNceB3rwsTxLri3HUWF6mWjfbvovS8HVZhYUcAHA%3D%3D; cna=IakcF4CQm3gCAXW7+dxgQxGp; sgcookie=E2lPAmrso4SdnP1qLE8a6; t=1f89b53da080fcee11386787b0604be1; tracknick=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; lgc=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; _tb_token_=318b35fe300eb; cookie2=1e4c25b69b94c9fe58758553009376f5; _m_h5_tk=ebb423f10f34ab8a0296615c7bb2662e_1587917103060; _m_h5_tk_enc=f7ee88a93e9cc1354e99a82f68a8055e; x5sec=7b22726174656d616e616765723b32223a223837333430666631343037653137653938396533306232363963666561346464434c61356c765546454d614f6d624f726d39486f3167453d227d; dnk=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; uc1=existShop=false&cookie14=UoTUPcXJy2NZ6g%3D%3D&cookie15=UIHiLt3xD8xYTw%3D%3D&cookie21=VT5L2FSpccLuJBreK%2BBd&pas=0&cookie16=UIHiLt3xCS3yM2h4eKHS9lpEOw%3D%3D; uc3=nk2=0Jw4ZWKL5ncb%2BZbc%2BzeuWaIvSDzGqg%3D%3D&lg2=U%2BGCWk%2F75gdr5Q%3D%3D&id2=UU6kVzP5MUeA7w%3D%3D&vt3=F8dBxGRyNbP9gah03fE%3D; _l_g_=Ug%3D%3D; uc4=nk4=0%400hYwK0dP9pa%2BHtt%2BZ%2BnIIJJ3vuOLV1en5URtcwfplD3h&id4=0%40U2xpVauIQpog3DB9k9NdS5VHJyyf; unb=2654376244; cookie1=VW7p1uG6ehuQnGiMx3e1bs9aTbIAwbuKJpX8eqzeCcI%3D; login=true; cookie17=UU6kVzP5MUeA7w%3D%3D; _nk_=%5Cu8D85%5Cu5A01%5Cu84DD%5Cu732B%5Cu6211%5Cu6709%5Cu77E5%5Cu8BC6%5Cu6211%5Cu81EA%5Cu8C6A; sg=%E8%B1%AA42; csg=17e13678; l=eBSn_cgIQpsWV4I3BOfaFurza77OSIRYYuPzaNbMiT5P_WCW5_kOWZjSZW8XC31Vh6lyR3yAaP04BeYBqIq0x6aNa6Fy_Ckmn; isg=BD09weAUP_lJfZulrCdScNF0TJk32nEsVtiNIf-CeRTBNl1oxyqB_Avg4OpwrYnk"
        # 我自己的Cookie，用来绕过登陆框，可能会失效（目前倒是没遇到过）
        }
        self.itemId = itemId
        self.sellerId = sellerId

    def get_page(self): #用于伪造访问头信息里的3个可变参数
        t_param = time.time()
        t_list = str(t_param).split(".")
        params = {"sellerId": self.sellerId,
                  "itemId": self.itemId,
                  "callback": str(int(t_list[1][3:]) + 1),
                  "_ksTS": t_list[0] + t_list[1][:3] + "_" + t_list[1][3:],
                  "currentPage": self.currentPage
                  }
        res = requests.get(self.url, params=params, headers=self.headers)
        try:
            if res.status_code == 200:
                res = requests.get(self.url, params=params, headers=self.headers).text[len(t_list[1][3:]) + 3:-1]
                res_json = json.loads(res)
                res_str = json.dumps(res_json, indent=4)
                return json.loads(res_str)
        except:
            return None

    def get_content(self, json_data):
        if json_data != None:
            for item in json_data.get("rateDetail").get("rateList"):
                content_time = item.get("rateDate")
                content_type = item.get("auctionSku")
                content_name = item.get("displayUserNick")
                content_data = item.get("rateContent")
                yield {
                    "content_time": content_time,
                    "content_type": content_type,
                    "content_name": content_name,
                    "content_data": content_data,
                }
        else:
            print("当前页面爬取失败")
            return None

    def write_txt(self, data):
        """这是将结果写入txt文本文档的格式

    	   将字典写入文本文档首先要利用json.dumps()转换成字符串格式.
    	   json.dumps的indent参数是为了美化输出
    	   json.ensure_ascii参数将数据输出程中文，同时要规定文件输出编码为utf-8
        """
        with open("./data/testcsv.txt", "a", encoding="utf-8") as file:
            file.write(json.dumps(data, indent=2, ensure_ascii=False))
            file.write("\n")

    def write_csv(self, data):
        with open("./data/testcsv.csv", "a", encoding="utf-8-sig", newline='') as file:
            fieldnames = ["content_time", "content_type", "content_name", "content_data"]
            writer = csv.DictWriter(file, fieldnames=fieldnames)
            writer.writerow(data)

    def main(self):
        json_data = self.get_page()
        self.get_content(json_data)
        return self.get_content(json_data)


if __name__ == "__main__":
    if(os.path.exists("./data/testcsv.csv")):
        os.remove("./data/testcsv.csv")
        print('已删除')
    else:
        print('无')
    file = open("./data/URL.txt", "r")
    # 这个txt是URL集合：把想爬的一个或多个商品天猫URL丢进这个txt即可，URL间以换行符间隔即可
    urllist = file.readlines()  # 每一行数据写入到list中
    header()
    for fields in urllist:
        itemID = (re.search(re.compile(".*&id=?(.*?)&.*"), fields)).group(1)
        sellerID = re.search(re.compile(".*&user_id=?(.*?)&.*"), fields).group(1)
        print("当前爬取商品号："+itemID+" 店铺号："+sellerID)
        for i in range(1,88):  # 前3页评论
            new_data = taobaoSpider_content(itemId=itemID, sellerId=sellerID, currentPage=i)
            print('爬取第%d页中...'%(i))
            if new_data.main() != None:
                for items in new_data.main():
                    # new_data.write_txt(items)
                    new_data.write_csv(items)
            else:
                pass
            time.sleep(random.randint(3, 6))  # 设置延时防止爬虫被封


