import sys
sys.path.append("public")
from demo import bot
from urllib.parse import quote
from urllib.parse import urljoin
from pyquery import PyQuery as jq
import json,re
import math
import random
import time
import logging
from openpyxl import *

import jieba
from os import path
from scipy.misc import imread
import matplotlib.pyplot as plt
from wordcloud import WordCloud, ImageColorGenerator
from snownlp import SnowNLP
keys = "空调" #关键字
mainurl = "https://jd.com"
xls_file = f"{keys}.xlsx"
datacol = ["商品名", "店铺名", "销量", "价格","网址","底部标签","评论分类","评论"]
class spider(bot):
    """docstring for spider"""
    def __init__(self, mainurl=None):
        super(spider, self).__init__(mainurl)
        #翻页
        self.search_url = "https://search.jd.com/Search"#搜索链接
        #评论
        self.comments = "https://sclub.jd.com/comment/productPageComments.action?"
        self.mylist = []
        self.all_dict = {}
        self.comment_list = []
        self.wb_content_type = ""
       #表格创建初始化
    def get_xls_cursor(self):
        self.wb = Workbook()
        if os.path.exists(xls_file):
            self.wb = load_workbook(xls_file)
        else:
            self.sheet = self.wb.active
            self.sheet.append(datacol)
            self.wb.save(xls_file)
        self.sheet = self.wb.active
        self.sheet.title = "datas"

        #分析他网页参数
    def data_parser(self, types, resp, jsonpcode="", shopid=None):
        try:
            if types in "page":
                jqdata = jq(resp.content)#获取网页源码
                #手机列表在 class='J_goodsList'的div下
                #解析ul li数据
                for index,item in enumerate(jqdata("#J_goodsList > ul > li").items()):
                    if index > 3:
                        break
                    sku = item.attr('data-sku')  #保存商品的编码
                    #获取商品名字
                    next_name = item(
                            "div > div.p-name.p-name-type-2 > a > em").text().strip().replace('\n',' ')
                    #获取价格
                    price = item(
                            "div > div.p-price > strong > i").text()
                    #获取销售量
                    comment = item(
                            "div > div.p-commit > strong > a").text()
                    #获取详情页连接
                    jsonurl = urljoin("https://item.jd.com",item(
                            "div > div.p-name.p-name-type-2 > a").attr('href'))
                    #底部标签
                    bottom_lable = item(
                            "div > div.p-icons").text()
                    #获取店铺名字
                    store_name = item("div > div.p-shop > span >  a").text()
                    self.logger.info(f"current sku : {sku}")
                    #分类的所以数据
                    hotCommentTag = self.getPercentType(sku)
                    self.all_dict = {'sku':sku,'content':{'name':next_name,'store_name':store_name,'info_url':jsonurl,'price':price,'comment':comment,'bottom_lable':bottom_lable}}
                    for indexs,tag in enumerate(hotCommentTag):
                        count = int(tag['count'])
                        name = tag['name']
                        rid = tag['rid']
                        self.logger.info(f"current hotCommentTag : {name} : {count}")
                        for pages in range(0,count//10 +1):
                            self.getContent(pages,sku,rid)
                        self.all_dict[name] = self.comment_list
                        self.comment_list = []
                    #生成json保存数组                        
                    self.logger.info(f"current hotCommentTag : next")
                    self.mylist.append(self.all_dict)
        except Exception as e:
            print(e)
    #jsonp转化为json
    def loads_jsonp(self,_jsonp):
        try:
            return json.loads(re.match(".*?({.*}).*",_jsonp,re.S).group(1))
        except:
            raise ValueError('Invalid Input')
    #解析json
    def parseJson(self,data):
        comments = data['comments']
        for comment in comments:
            other = {"nickname":comment['nickname'],"content":comment['content']}
            self.comment_list.append(other)

    #获取评论分类
    def getPercentType(self,skuid):
        params = {"callback":"callbackFunction",
            "productId":skuid,
            "score":"0",
            "sortType":"5",
            "page":"2",
            "pageSize":"1"}
        self.logger.info(f"current PercentType")
        resp = self.session.get(self.comments, params=params)
        if self.resp_check(resp):#请求验证
            self.logger.info("current getJson")
            myjson = self.loads_jsonp(resp.text)
            return myjson['hotCommentTagStatistics']
    #获取评论数据
    def getContent(self,page,skuid,rid):
        params = {"callback":"callbackFunction",
            "productId":skuid,
            "rid":rid,
            "score":"0",
            "sortType":"5",
            "page":page,
            "pageSize":"10"}
        self.logger.info(f"current page_number -> [{page}]")
        resp = self.session.get(self.comments, params=params)
        if self.resp_check(resp):#请求验证
            myjson = self.loads_jsonp(resp.text)
            self.parseJson(myjson)
    def resp_check(self, resp, login=False):
        """响应处理，429,403错误自动切换用户"""
        if self.randomUA:
            self.crawler.random_user_agent()
        netcode = resp.status_code
        if netcode == 200:
            return True
     #获取商品列表
    def get_page(self, page):
        #get参数，用于翻页
        params = {"bs": "1",
                  "click": "0",
                  "enc": "utf-8",
                  # "ev": "exbrand_Apple^",
                  "keyword": keys,#关键词
                  "page": 1 + (page - 1) * 2,
                  "qrst": "1",
                  "rt": "1",
                  "stop": "1",
                  "suggest": "1.def.0.V08",
                  "vt": "2",
                  "wq": "iph"}
        self.logger.info(f"current page_number -> [{page}]")
        resp = self.session.get(self.search_url, params=params)
        if self.resp_check(resp):#请求验证
            self.data_parser("page", resp)
    def start(self):
        for page in range(2,3):#设置翻页数
            self.get_page(page)#翻页功能
        #保存数据到jd.json文件
        with open(f"./{keys}.json",'w',encoding='utf-8') as json_file:
                json.dump({"list":self.mylist},json_file,ensure_ascii=False)
    #保存excel表格
    def savexlsx(self):
        zhPattern = re.compile(u'[\u4e00-\u9fa5]+')
        for item in self.mylist:
            # ["商品名", "店铺名", "销量", "价格","网址", "评论"]
            self.sheet.append([
                item['content']['name'],
                item['content']['store_name'],
                item['content']['comment'],
                item['content']['price'],
                item['content']['info_url'],
                item['content']['bottom_lable']
            ])
            for k,v in item.items():
                if zhPattern.search(k):
                    if k is self.wb_content_type:
                        types = ""
                    else:
                        types = k
                    self.wb_content_type = k
                    try:
                        self.sheet.append([
                                "",
                                "",
                                "",
                                "",
                                "",
                                "",
                                types,
                                ""
                            ])
                    except Exception as e:
                        raise
                    for conent in v:
                        try:
                            self.sheet.append([
                                    "",
                                    "",
                                    "",
                                    "",
                                    "",
                                    "",
                                    "",
                                    conent['content']
                                ])
                        except Exception as e:
                            raise
            self.wb.save(xls_file)
    #保存词频图片
    def save_ciping(self):
        back_coloring_path = "aaa.jpg" # 设置背景图片路径
        font_path = 'simkai.ttf' # 为matplotlib设置中文字体路径没
        back_coloring = imread(back_coloring_path)# 设置背景图片
        image_colors = ImageColorGenerator(back_coloring)
        # 设置词云属性
        wc = WordCloud(font_path=font_path,  # 设置字体
            background_color="white",  # 背景颜色
            max_words=2000,  # 词云显示的最大词数
            mask=back_coloring,  # 设置背景图片
            max_font_size=100,  # 字体最大值
            random_state=42,
            width=1000, height=860, margin=2,# 设置图片默认的大小,但是如果使用背景图片的话,那么保存的图片大小将会按照其大小保存,margin为词语边缘距离
            )

        zhPattern = re.compile(u'[\u4e00-\u9fa5]+')
        for index,sku in enumerate(self.mylist):
            all_str = ""
            strs = ""
            for k,v in sku.items():
                if zhPattern.search(k):
                    for comments in v:
                        strs = strs + comments['content']
            all_str = all_str + strs
            # 生成词云, 可以用generate输入全部文本(wordcloud对中文分词支持不好,建议启用中文分词),也可以我们计算好词频后使用generate_from_frequencies函数
            wc.generate(all_str)
            # wc.generate_from_frequencies(txt_freq)
            # txt_freq例子为[('词a', 100),('词b', 90),('词c', 80)]
            # 从背景图片生成颜色值

            plt.figure()
            # 以下代码显示图片
            plt.imshow(wc)
            plt.axis("off")
            wc.recolor(color_func=image_colors)
            # plt.show()
            # 绘制词云

            # 保存图片
            self.logger.info(f"保存商品{index}")
            wc.to_file(f'商品{index}.jpg')
    #保存情感分析
    def savesnowNlp(self):
        zhPattern = re.compile(u'[\u4e00-\u9fa5]+')
        for index,sku in enumerate(self.mylist):
            all_str = ""
            strs = ""
            for k,v in sku.items():
                if zhPattern.search(k):
                    for comments in v:
                        strs = strs + comments['content']
            all_str = all_str + strs
            s = SnowNLP(all_str)
            mylist = []
            for key in s.sentences:
                asd  = SnowNLP(key).sentiments
                mylist.append(key + '(' + str(asd) + ')')
            with open(f"情感分析{index}.txt",'w',encoding='utf-8') as file:
                file.write(','.join(mylist))

        
if __name__ == '__main__':
    try:
        myspider = spider("https://jd.com")
        myspider.start()
        myspider.get_xls_cursor()
        myspider.savexlsx()
        myspider.save_ciping()
        myspider.savesnowNlp()
    except KeyboardInterrupt:
        pass
