#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# 1.需要先下载 scrapy_splash  : pip install scrapy_splash
# 2.修改当前mysql的参数
# 3.修改当前splash的链接 (启动splash项目,需要先安装docker,具体参考https://www.jianshu.com/p/4052926bc12c)

import os, sys
import scrapy
import json
import urllib.parse
import re
import datetime
from scrapy.linkextractors import LinkExtractor
from scrapy import Spider, Request
from urllib.parse import quote
from scrapysplashtest.items import ScrapysplashtestItem
from scrapy_splash import SplashRequest


lua_script = """
function main(splash)
    splash:go(splash.args.url)
    splash.images_enabled = false
    splash:wait(0.5)
    return splash:html()
end
"""


class TaobaoSpider(Spider):
    name = "ALLtaobao"
    allowed_domains = [
        "www.taobao.com",
        "detail.tmall.com",
        "rate.taobao.com",
        "s.taobao.com",
    ]
    base_url = "https://www.taobao.com/tbhome/page/market-list"
    header = {
        "Host": "s.taobao.com",
        "Connection": "keep-alive",
        "Cookie": "_uab_collina=152384731481486255434221; _umdata=70CF403AFFD707DF1A85F005364DC10B4EC91835EB1330570EBBEE6ED83206C5D22033AB874D1A3DCD43AD3E795C914C27AEEB05138BEBFEE2EBDC2C9B919A87",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        "Upgrade-Insecure-Requests": "1",
        "User-Agent": "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0",
        "Accept-Encoding": "gzip, deflate, sdch",
        "Accept-Language": "zh-CN,zh;q=0.8",
    }

    def start_requests(self):  # 重新定义起始url
        url = self.base_url
        yield scrapy.Request(url, self.parse, dont_filter=True)

    def parse(self, response):
        # 解析list链接
        pattern = r".*//s.taobao.com/list?.*"
        le = LinkExtractor(allow=pattern)
        # links = le.extract_links(response)
        links = ['https://s.taobao.com/list?spm=a21bo.7723600.8557.64.38595ec99ghkF1&seller_type=taobao&initiative_id=tbindexz_20150610&sourceId=tb.index&search_type=item&ssid=s5-e&commend=all&q=%E7%94%B5%E6%B1%A0&cat=50008090&suggest=cat_2&_input_charset=utf-8&wq=%E9%90%A2%E5%9E%AB%E7%9D%9C&suggest_query=%E9%90%A2%E5%9E%AB%E7%9D%9C&source=suggest']
        print("发现list页面共：【%s】" % len(links))
        
        for i in links:
            # print("-------------------->%s" % i.url)
            print("-------------------->%s" % i)
            yield SplashRequest(
                # i.url,
                i,
                callback=self.next_page,
                endpoint="execute",
                args={"lua_source": lua_script},
                dont_filter=True,
            )

    def next_page(self, response):
        print("开始获取下一页")
        # 获取page total,翻页操作
        dirty_total = response.xpath(
            '//*[@id="listsrp-pager"]/div/div/div/div[1]/text()'
        ).extract_first()
        if dirty_total is not None:
            page_total = int(re.findall(r"\d+\.?\d*", dirty_total)[0])
        else:
            page_total = 5
        print("开始获取下一页")
        for page in range(page_total + 1):
            page_url = response.url + "&s=" + str(page * 60)
            print("获取list：【%s】，第【%s】页。" % (response.url, page))
            yield SplashRequest(
                page_url,
                callback=self.parse_shop,
                endpoint="execute",
                args={"lua_source": lua_script},
                dont_filter=True,
            )

    def parse_shop(self, response):
        print(response.url)
        print("开始全量商品页")
        classification = re.findall(r"&q=(.*?)&", response.url)
        if classification:
            classification = urllib.parse.unquote(classification[0])
        else:
            classification = "无分类"
        products = response.xpath(
            '//div[@id="listsrp-itemlist"]//div[@class="items"][1]//div[contains(@class, "item")]'
        )
        print("解析列表页商品信息")
        for product in products:
            price = "".join(
                product.xpath('.//div[contains(@class, "price")]//text()').extract()
            ).strip()
            title = "".join(
                product.xpath('.//div[contains(@class, "title")]//text()').extract()
            ).strip()
            shop = "".join(
                product.xpath('.//div[contains(@class, "shop")]//text()').extract()
            ).strip()
            image = "".join(
                product.xpath(
                    './/div[@class="pic"]//img[contains(@class, "img")]/@data-src'
                ).extract_first()
            ).strip()
            deal_preson = product.xpath(
                './/div[contains(@class, "deal-cnt")]//text()'
            ).extract_first()
            location = product.xpath(
                './/div[contains(@class, "location")]//text()'
            ).extract_first()
            shop_id = product.css("div .pic a::attr('data-nid')").extract_first()
            shop_url = "https://detail.tmall.com/item.htm?id=" + str(shop_id)
            shop_info = {
                "classification": classification,
                "shop_url": shop_url,
                "shop_id": shop_id,
                "title": title,
                "shop": shop,
                "image": image,
                "price": price,
                "deal_preson": deal_preson,
                "location": location,
            }
            # print('价格%s,标题%s,店铺%s' % (price, title, shop))
            print("商品url是：%s" % shop_url)
            yield SplashRequest(
                shop_url,
                callback=self.shop_info_parse,
                meta=shop_info,
                args={"images": 0, "lua_source": lua_script},
                cache_args=["lua_source"],
                dont_filter=True,
            )

    def shop_info_parse(self, response):
        print("开始解析商品详情页")
        shop_id = response.meta.get("shop_id")
        shop_url = response.meta.get("shop_url")
        title = response.meta.get("title")
        shop = response.meta.get("shop")
        image = response.meta.get("image")
        price = response.meta.get("price")
        deal_preson = response.meta.get("deal_preson")
        location = response.meta.get("location")
        classification = response.meta.get("classification")
        comment_num = response.xpath(
            '//*[@id="J_ItemRates"]/div/span[2]/text()'
        ).extract_first()  # 评论量
        """判断评论量是否为空，如果为空，抓取其他位置     通过抓取数据是否为空判断抓取逻辑"""
        '//*[@id="J_ItemRates"]/div/span[2]'
        '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/div[2]/dl[1]/dd/span'
        if comment_num is None:
            comment_num = response.xpath(
                '//*[@id="J_TabBar"]/li[2]/a/em/text()'
            ).extract_first()
            deal_30 = response.xpath(
                '//*[@id="J_Counter"]/div/div[2]/a/@title/text()'
            ).extract_first()
            original_preice = response.xpath(
                '//*[@id="J_StrPrice"]/em[2]/text()'
            ).extract_first()
        else:
            comment_num = comment_num
            deal_30 = response.xpath(
                '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/ul/li[1]/div/span[2]/text()'
            ).extract_first()  # 30天销量
            original_preice = response.xpath(
                '//*[@id="J_DetailMeta"]/div[1]/div[1]/div/div[2]/dl[1]/dd/span/text()'
            ).extract_first()  # 原价
        store_describe = response.xpath(
            '//*[@id="shop-info"]/div[2]/div[1]/div[2]/span/text()'
        ).extract_first()  # 店铺描述
        """淘宝店铺与天猫店铺抓取逻辑不一致，如抓取天猫店铺为None值，则改为淘宝抓取逻辑"""
        if store_describe is None:
            store_describe = response.xpath(
                '//*[@id="J_ShopInfo"]/div/div[2]/div/dl[1]/dd/a/text()'
            ).extract_first()
            store_service = response.xpath(
                '//*[@id="J_ShopInfo"]/div/div[2]/div/dl[2]/dd/a/text()'
            ).extract_first()
            store_logistics = response.xpath(
                '//*[@id="J_ShopInfo"]/div/div[2]/div/dl[3]/dd/a/text()'
            ).extract_first()
        else:
            store_describe = store_describe
            store_service = response.xpath(
                '//*[@id="shop-info"]/div[2]/div[2]/div[2]/span/text()'
            ).extract_first()  # 店铺服务
            store_logistics = response.xpath(
                '//*[@id="shop-info"]/div[2]/div[3]/div[2]/span/text()'
            ).extract_first()  # 店铺物流
        store_time = response.xpath(
            '//*[@id="ks-component1974"]/div/div/div/div[2]/ul/li[3]/div/span[2]/text()'
        ).extract_first()  # 开店时长
        inventory = response.xpath('//*[@id="J_EmStock"]/text()').extract_first()  # 库存
        """判断库存抓取逻辑是天猫还是淘宝"""
        if inventory is None:
            inventory = response.xpath('//*[@id="J_SpanStock"]/text()').extract_first()
        else:
            inventory = inventory
        """每页评论为20条，获取所需翻页数"""
        if comment_num is None:
            comment_num = 1
        comment_num = int(comment_num)
        shop_info = {
            "classification": classification,
            "shop_id": shop_id,
            "title": title,
            "shop": shop,
            "shop_url": shop_url,
            "image": image,
            "price": price,
            "deal_preson": deal_preson,
            "location": location,
            "comment_num": comment_num,
            "deal_30": deal_30,
            "original_preice": original_preice,
            "store_describe": store_describe,
            "store_service": store_service,
            "store_logistics": store_logistics,
            "store_time": store_time,
            "inventory": inventory,
        }
        if comment_num <= 20:
            page = 1
            comment_url = "https://rate.taobao.com/feedRateList.htm?auctionNumId={shop_id}&currentPageNum={page}".format(
                shop_id=shop_id, page=page
            )
            yield scrapy.Request(
                comment_url, callback=self.comment_parse, meta=shop_info
            )
        else:
            page = round(comment_num / 20)
        if page > 251:  # 实测评论只有251页
            page = 251
        print("30天购买人数为：%s" % deal_preson)
        print("历史价格为：%s" % original_preice)
        print("商品评论数为：%s" % comment_num)
        print("商品30天销量为：%s" % deal_30)
        print("评论共计：%s页" % page)
        for k in range(page + 1):
            comment_url = "https://rate.taobao.com/feedRateList.htm?auctionNumId={shop_id}&currentPageNum={page}".format(
                shop_id=shop_id, page=k
            )
            yield scrapy.Request(
                comment_url, callback=self.comment_parse, meta=shop_info
            )

    def comment_parse(self, response):
        print("开始解析评论信息")
        print(response.url)
        ALLtaobao = ScrapysplashtestItem()
        """将商品详细信息传入item"""
        ALLtaobao["shop_url"] = response.meta.get("shop_url")
        ALLtaobao["shop_id"] = response.meta.get("shop_id")
        ALLtaobao["title"] = response.meta.get("title")
        ALLtaobao["shop"] = response.meta.get("shop")
        ALLtaobao["image"] = response.meta.get("image")
        ALLtaobao["price"] = response.meta.get("price")
        ALLtaobao["deal_preson"] = response.meta.get("deal_preson")
        ALLtaobao["location"] = response.meta.get("location")
        ALLtaobao["comment_num"] = response.meta.get("comment_num")
        ALLtaobao["deal_30"] = response.meta.get("deal_30")
        ALLtaobao["original_preice"] = response.meta.get("original_preice")
        ALLtaobao["store_describe"] = response.meta.get("store_describe")
        ALLtaobao["store_service"] = response.meta.get("store_service")
        ALLtaobao["store_logistics"] = response.meta.get("store_logistics")
        ALLtaobao["store_time"] = response.meta.get("store_time")
        ALLtaobao["inventory"] = response.meta.get("inventory")
        ALLtaobao["spider_datetime"] = datetime.datetime.now().strftime(
            "%Y-%m-%d %H:%M:%S"
        )
        json_data = json.loads(response.text.replace("(", "").replace(")", ""))
        if json_data["comments"] is not None:
            for i in range(len(json_data["comments"])):
                ALLtaobao["comment_date"] = json_data["comments"][i]["date"]
                ALLtaobao["content"] = json_data["comments"][i]["content"]
                ALLtaobao["rateId"] = json_data["comments"][i]["rateId"]
                ALLtaobao["sku"] = json_data["comments"][i]["auction"]["sku"]
                ALLtaobao["nick"] = json_data["comments"][i]["user"]["nick"]
                ALLtaobao["vipLevel"] = json_data["comments"][i]["user"]["vipLevel"]
                ALLtaobao["rank"] = json_data["comments"][i]["user"]["rank"]
                print(
                    json_data["comments"][i]["date"],
                    json_data["comments"][i]["content"],
                    json_data["comments"][i]["rateId"],
                    json_data["comments"][i]["auction"]["sku"],
                    json_data["comments"][i]["user"]["nick"],
                    json_data["comments"][i]["user"]["vipLevel"],
                    json_data["comments"][i]["user"]["rank"],
                )
                print("yield全量数据")
                yield ALLtaobao
