import scrapy
from scrapy.selector import Selector
from scrapy import Request
from ..items import DDItem
import re
from lxml import etree
import json


class DdbookSpider(scrapy.Spider):
    name = 'DDbook'
    start_page = 1
    finish_page = 101
    # http://category.dangdang.com/cp01.54.00.00.00.00.html
    cpurl = 'cp01.43.19.00.00.00.html'
    url = 'http://category.dangdang.com/pg%s-%s' % (str(start_page), cpurl)
    start_urls = [url]
    # start_urls = ['http://category.dangdang.com/pg' + str(start_page) + '-cp01.54.00.00.00.00.html']

    def parse(self, response):
        sel = Selector(response)
        book_list = sel.css('ul#component_59').xpath('li')

        for book in book_list:
            item = DDItem()
            item['link'] = book.css('p.name').xpath('a/@href').extract_first()
            try:
                item['remd1'] = book.css('p.detail').xpath('text()').extract_first()
            except ImportError as e:
                item['remd1'] = ''

            yield scrapy.Request(callback=self.parse_book, meta={'item': item}, url=item['link'])

        if self.start_page <= self.finish_page:
            next_page = 'http://category.dangdang.com/pg%s-%s' % (str(self.start_page), self.cpurl)
            self.start_page += 1
            yield scrapy.Request(next_page, callback=self.parse)

    # 爬取图书详情页
    def parse_book(self, response):
        item = response.meta['item']
        headers = {'Connection': 'keep-alive', 'Content-Encoding': 'gzip',
                   'Accept': 'application/json, text/javascript, */*; q=0.01',
                   'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'}
        try:
            DDPattern = re.compile(r'.com/(\d+).html')  # 查找数字
            item["DDid"] = DDPattern.findall(item["link"])[0]
            item["ISBN"] = response.xpath("//*[@id='detail_describe']/ul//li[5]/text()").extract()[0].split('：')[1].strip()
            item["name"] = response.xpath("//*[@id='product_info']/div[1]/h1/@title").extract_first()
            try:
                item["author"] = response.xpath("//*[@id='author']/a/text()").extract()[0]
            except IndexError as e:
                item["author"] = ''

            try:
                item["press"] = response.xpath("//*[@id='product_info']/div[@class='messbox_info']/span[@dd_name='出版社']/a/text()").extract()[0]
            except IndexError as e:
                item["press"] = ''

            try:
                item["publishingTime"] = response.xpath("//*[@id='product_info']/div[@class='messbox_info']/span[position()=3]/text()").extract()[0].split("出版时间:")[1].strip()
            except IndexError as e:
                item["publishingTime"] = ''

            # 商品 评论数
            try:
                item["commentNumber"] = response.xpath("//*[@id='comm_num_down']/text()").extract()[0]
            except IndexError as e:
                item["commentNumber"] = ''

            try:
                item["price"] = response.xpath("//*[@id='original-price']/text()").extract()[1].strip()
            except IndexError as e:
                item["price"] = ''

            try:
                item["selling"] = response.xpath("//*[@id='dd-price']/text()").extract()[1].strip()
            except IndexError as e:
                item["selling"] = response.xpath("//*[@id='dd-price']/text()").extract()[0].strip()

            try:
                item["shipper"] = response.xpath("//*[@id='shop-geo-name']/text()").extract()[0].replace('\xa0至', '')
            except IndexError as e:
                item["shipper"] = '当当'

            try:
                item["openBook"] = self.strSplit(response.xpath("//*[@id='detail_describe']/ul[@class='key clearfix']/li[1]/text()").extract()[0])
            except IndexError as e:
                item["openBook"] = ''

            try:
                item["page"] = self.strSplit(response.xpath("//*[@id='detail_describe']/ul[@class='key clearfix']/li[2]/text()").extract()[0])
            except IndexError as e:
                item["page"] = ''

            try:
                item["pcking"] = self.strSplit(response.xpath("//*[@id='detail_describe']/ul[@class='key clearfix']/li[3]/text()").extract()[0])
            except IndexError as e:
                item["pcking"] = ''

            try:
                item["isSuit"] = self.strSplit(response.xpath("//*[@id='detail_describe']/ul[@class='key clearfix']/li[4]/text()").extract()[0])
            except IndexError as e:
                item["isSuit"] = ''

            try:
                item["classification"] = ('>'.join(response.css('li#detail-category-path>span.lie').xpath('a/text()').extract())).replace('>图书', ' 图书')
            except IndexError as e:
                item["classification"] = ''

            try:
                item["pic1"] = response.xpath("//*[@id='largePicDiv']/a/img[@id='largePic']/@src").extract_first()
            except IndexError as e:
                item["pic1"] = ''
            item["pic2"] = ''
            try:
                categoryPathPattern = re.compile(r'"categoryPath":"(?:[0-9]{1,3}\.){5}[0-9]{1,3}')  # 查找数字
                item["categoryPath"] = categoryPathPattern.findall(response.text)[0].split('"categoryPath":"')[1]
            except IndexError as e:
                item["categoryPath"] = ''
            try:
                shopIdPattern = re.compile(r'"shopId":"\d{0,12}')  # 查找数字
                item["shopId"] = shopIdPattern.findall(response.text)[0].split('"shopId":"')[1]
            except IndexError as e:
                item["shopId"] = ''
            try:
                describeMapPattern = re.compile(r'"describeMap":"\d{0,19}\:[0-9]{1}')  # 查找数字
                item["describeMap"] = describeMapPattern.findall(response.text)[0].split('"describeMap":"')[1]
            except IndexError as e:
                item["describeMap"] = ''

            item["remd2"] = ''
            item["typeName"] = ''
            item["typeRank"] = ''
            item["content2"] = ''
            try:
                post_url = 'http://product.dangdang.com/index.php?r=callback%2Fdetail&productId=' + item["DDid"] + '&templateType=publish&describeMap=' + item["describeMap"] + '&shopId=' + item["shopId"] + '&categoryPath=' + item["categoryPath"]
                yield Request(url=post_url, callback=self.parse_content, meta={'item': item})
            except IndexError as e:
                item["content1"] = ''
        except IndexError as e:
            print(e)
            print('err  =============')  # 23455796

    def parse_content(self, response):
        item = response.meta['item']
        try:
            if response.text and json.loads(response.text)['data']['html']:
                html = json.loads(response.text)['data']['html']
                response_soup = etree.HTML(html)
                try:
                    item["content1"] = ' '.join(response_soup.xpath('//*[@id="content"]/*[@class="descrip"]/div/text()'))[:400].strip()
                except IndexError as e:
                    print(111)
                if len(item["content1"]) < 10:
                    try:
                        item["content1"] = ' '.join(response_soup.xpath('//*[@id="content"]/*[@class="descrip"]/text()'))[:400].strip()
                    except IndexError as e:
                        print(1112)
                if len(item["content1"]) < 10:
                    try:
                        item["content1"] = response_soup.xpath('//*[@id="content"]/*[@class="descrip"]/span[2]/text()')[0][:400].strip()
                    except IndexError as e:
                        print(1113)
                if len(item["content1"]) < 10:
                    try:
                        item["content1"] = response_soup.xpath('//*[@id="content"]/*[@class="descrip"]/div[1]/div[1]/text()')[0][:400].strip()
                    except IndexError as e:
                        print(1114)
                if len(item["content1"]) < 10:
                    try:
                        item["content1"] = response_soup.xpath('//*[@id="content"]/*[@class="descrip"]/p[1]/span[1]/text()')[0][:400].strip()
                    except IndexError as e:
                        print(1115)
                if len(item["content1"]) < 10:
                    try:
                        item["content1"] = response_soup.xpath('//*[@id="content"]/*[@class="descrip"]/span[@id="content-show"]/text()')[0][:400].strip()
                    except IndexError as e:
                        print(1116)
                if len(item["content1"]) < 10:
                    try:
                        item["content1"] = response_soup.xpath('//*[@id="content"]/*[@class="descrip"]/p[1]/text()')[0][:400].strip()
                    except IndexError as e:
                        print(1117)
                if len(item["content1"]) < 10:
                    try:
                        item["content1"] = response_soup.xpath('//*[@id="content"]/*[@class="descrip"]/div[1]/p[1]/text()')[0][:400].strip()
                    except IndexError as e:
                        print(1118)
                if len(item["content1"]) < 10:
                    try:
                        item["content1"] = response_soup.xpath('//*[@id="content"]/*[@class="descrip"]/div[1]/ul[1]/li[1]/text()')[0][:400].strip()
                    except IndexError as e:
                        print(1119)
        except IndexError as e:
            print(1213)
        try:
            rank_url = 'http://product.dangdang.com/index.php?r=callback%2Fget-bang-rank&productId=' + item["DDid"]
            yield Request(url=rank_url, callback=self.parse_rank, meta={'item': item})
        except IndexError as e:
            print(e)

    def parse_rank(self, response):
        item = response.meta['item']
        try:
            if response.text:
                rankJson = json.loads(response.text)
                item["typeName"] = rankJson['data']['pathName']
                item["typeRank"] = rankJson['data']['rank']
        except IndexError as e:
            print(e)
            item["typeName"] = ''
            item["typeRank"] = ''

        yield item

    def strSplit(self, strName):
        # 切割字符串 返回需要的字段
        arr = []
        if strName.find(':') != -1:
            arr = strName.split(':')
        elif strName.find('：') != -1:
            arr = strName.split('：')
        if len(arr) == 0:
            return False
        else:
            return arr[1].strip()
