# -*- coding: utf-8 -*-
import scrapy
import os
from lxml import etree
from scrapy.http import Request
from DynamicSpider.utils.driver_helper import  driver_headless
from DynamicSpider.JDGoodsItem import JDGoodsItem
from DynamicSpider.utils.publicFun import deleteHtmlTags,download_image,urlTofileName
import time

class JdSpider(scrapy.Spider):
    name = 'jd'
    allowed_domains = ['www.jd.com']
    start_urls = ['http://www.jd.com/']

    def __init__(self):
        '''
        构造函数，爬虫的初始化操作
        '''
        scrapy.Spider.__init__(self, self.name)

        self.query_key = input("请输入京东要爬取的关键字:")

        InStr = input("请输入爬去页码范围\n如：爬取第5页至第8页，就输入 5-8:\n")

        page_lst = InStr.split("-")

        if len(page_lst)==2 and \
                page_lst[0].isdigit() and \
                page_lst[1].isdigit() and \
                int(page_lst[0])<=int(page_lst[1]):

            self.start_page = int(page_lst[0])

            self.end_page = int(page_lst[1])

        else:

            print("输入页码范围错误，默认爬取第1页！！！！")

            self.start_page = 1

            self.end_page = 1

        print(f"正在为您爬取第{self.start_page}页至第{self.end_page}页,请稍后...")

        self.driver = driver_headless()

        self.driver.set_page_load_timeout(30)

        os.makedirs(os.path.abspath(".")+'/image/', exist_ok=True)

    def __del__(self):
        '''
        析构函数，清理操作
        :return:
        '''
        self.driver.close()

    def start_requests(self):
        '''
        重写start_requests, 保证start_urls进行预处理
        :return:
        '''
        for url in self.start_urls:
            r =  Request(url, meta={'type':'jd_home', 'query_key':self.query_key},
                         callback=self.parse_jd,
                         dont_filter=True)
            yield r


    def parse_jd(self, response):
        '''
        解析selenium下载的页面数据，采用scrapy response.xpath, response.css策略进行解析
        :param response:
        :return:
        '''

        pages_str = response.xpath("//div[@id='J_bottomPage']/span[@class='p-skip']/em/b/text()").extract_first()

        count = int(pages_str)

        if self.end_page > count:

            self.end_page = count

        for page in range(self.start_page,self.end_page+1):
            r = Request(url=self.start_urls[0],
                        meta={'type': 'page', 'page':page},
                        callback=self.page_ValueList, dont_filter=True)

            yield r

        #self.page_ValueList(response)



    def page_ValueList(self,response):



        goods_li_list = response.xpath("//div[@id='J_goodsList']/ul/li")

        #page = response.meta.get('page',None)

        #print(f"商品数量：{len(goods_li_list)}")

        for goods in goods_li_list:

            try:
                # 获取单个商品的网页代码，并转换成一个 lxml 对象便于解析
                xml = etree.HTML(goods.getall()[0])

                goods_detail_url = "https:" + xml.xpath('//div[starts-with(@class,"p-name")]/a/@href')[0]

                goods_title = xml.xpath('//div[starts-with(@class,"p-name")]/a/em/text()')[0]

                goods_price = xml.xpath('//div[starts-with(@class,"p-price")]/strong/i/text()')[0]

                r = Request(url=goods_detail_url,
                            meta={'type': 'goods_detail', "goods_title": goods_title, "goods_price": goods_price},
                            callback=self.goods_detail_url, dont_filter=True)

                yield r

            except Exception as e:

                print(f"商品列表信息提取错误：goods_title：{goods_title}，goods_price：{goods_price}，goods_detail_url：{goods_detail_url}")




    def goods_detail_url(self, response):

        try:

            # 商品标题也可以从response.meta中获取

            # goods_title = response.meta.get("goods_title", "")

            goods_title_lst = response.xpath("//div[@class='sku-name']/text()").extract()

            goods_title = "".join(goods_title_lst)

            #去除换行符和前后空格
            goods_title = goods_title.replace("\n", "").rstrip().lstrip()


            goods_image_url ="https:" + response.xpath("//div[@id='spec-n1']/img/@src | //div[@id='spec-list']/ul/li[1]/img/@src").extract_first()

            #将小图片54X54 尺寸替换为 303X303
            goods_image_url = goods_image_url.replace("/s54x54_jfs/","/s303x303_jfs/")

            # 下载静态网页中商品单价为空
            #goods_price = response.xpath("//strong[@id='jd-price']/text()").extract_first()

            # 商品单价 可从response.meta中获取
            goods_price = response.meta.get("goods_price", "")

            goods_detail_url = response.url

            goods_image_path = download_image(img_url=goods_image_url,fileName=urlTofileName(goods_image_url))

            item = JDGoodsItem(goods_title=goods_title,
                               goods_detail_url=goods_detail_url,
                               goods_image_url=goods_image_url,
                               goods_price=goods_price,
                               goods_image_path=goods_image_path)
            yield item

        except Exception as e:

            print(f"信息提取错误：{response.url}")









