# -*- coding: utf-8 -*-
import scrapy
from pandas import json
import re
from bs4 import BeautifulSoup
class GoodsreviewSpider(scrapy.Spider):
    name = "goodsReview"
    allowed_domains = ["dangdang.com"]
    start_urls = ['http://book.dangdang.com/01.03.htm']
    map = {
        0: 'categoryPath=01.03.51.00.00.00',
        1: 'categoryPath=01.03.45.00.00.00'
    }

    def parse(self, response):
        bookAndHrefs = {}
        # 主编推荐部分
        for i in range(1, 7):
            css_common = '#component_map_id_850577_part_id_6391 > div.tab_content_aa.tab_content_aatuijian > div.content.tab_'+str(i)
            book_href = response.selector.css(css_common +'> div.roll_aa> div.over>ul')
            if book_href.get() is None:
                book_href = response.selector.css(css_common + '> textarea')
            tag_a = book_href.xpath('.//a/@title').getall()
            href_a = book_href.xpath('.//a/@href').getall()
            for i in range(len(tag_a)):
                bookAndHrefs.setdefault(tag_a[i], href_a[i]) # 获得了每本书的href
        bookAndHrefs.clear()
        comment_url = self.parse_comment_url(bookAndHrefs, parseType=0)
        label_url = self.parse_label_url(bookAndHrefs, parseType=0)
        # 当当精选部分
        css_common = '#component_850585__6327__6327 > li'
        book_href = response.selector.css(css_common)
        print(book_href.get())
        title_a = book_href.xpath('.//a/@title').getall()
        href_a = book_href.xpath('.//a/@href').getall()
        for i in range(len(title_a)):
            bookAndHrefs.setdefault(title_a[i], href_a[i])

        comment_url = comment_url + self.parse_comment_url(bookAndHrefs, parseType=1)
        label_url =  label_url + self.parse_label_url(bookAndHrefs, parseType=1)
        # print(comment_url)
        for c_u in comment_url:
            yield scrapy.Request(c_u, callback=self.parse_comment)
        for l_u in label_url:
            yield scrapy.Request(l_u, callback=self.parse_label)

    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(url, callback=self.parse)

    def parse_comment(self, response):
        with open ('book_comment.csv', 'a', encoding='utf-8') as file:
            response_content = response.text
            try:
                response_content = json.loads(response_content)
                comment_data = response_content['data']['list']['html']
                comment_data = BeautifulSoup(comment_data, 'html.parser')
                comment_data = comment_data.find_all('div', attrs={'class':'describe_detail'})
                # 提取评论数据
                for comment_index in range(len(comment_data)):
                    comment = comment_data[comment_index]
                    file.write(comment.text.strip('\n')+'\n')
                current_page = re.search(r'pageIndex=[0-9]{1,6}', response.url)
                if (current_page):
                    current_page = current_page.group(0)
                    key, page = current_page.split('=')
                    page = int(page)
                # 判断是否所有的评论都已经完成提取
                page_count = response_content['data']['list']['summary']['pageCount']
                if (page_count):
                    page_count = int(page_count)
                if (page < page_count):
                    new_url = re.sub(key+'='+str(page), key+'='+str(page+1), response.url)
                    print('current page: %d \n total page: %d' % (page, page_count))
                    yield scrapy.Request(new_url, callback=self.parse_comment)
            except:
                print('no comment!')
    def parse_comment_url(self, bookAndHrefs, parseType=0):
        """
        :param bookAndHrefs:
        :return:
        :type1: http://product.dangdang.com/index.php?r=comment/list&productId=25138856&categoryPath=01.03.56.04.00.00&mainProductId=25138856&mediumId=0&pageIndex=1&sortType=1&filterType=1&isSystem=1&tagId=0&tagFilterCount=0&template=publish
        :type0: http://product.dangdang.com/index.php?r=comment/list&productId=25138856&categoryPath=01.03.51.00.00.00&mainProductId=25138856&mediumId=0&pageIndex=1&sortType=1&filterType=1&isSystem=1&tagId=0&tagFilterCount=0&template=publish
        """
        commentUrl = []
        forward = 'http://product.dangdang.com/index.php?r=comment/list&'
        pageIndex = 'pageIndex=1'
        for key in bookAndHrefs:
            href = bookAndHrefs[key]
            href = href.split('?')[0]
            bookId = href.split('/')[-1]
            bookId = bookId.split('.')[0]
            productId = 'productId=' + bookId
            mainProductId = 'mainProductId=' + bookId
            backward = 'mediumId=0&'+ self.map[parseType] + '&sortType=1&filterType=1&isSystem=1&tagId=0&tagFilterCount=0&template=publish'
            url = forward + productId + '&' + mainProductId+ '&' + pageIndex + '&' + backward
            commentUrl.append(url)
        return commentUrl

    def parse_label(self, response):
        """
        :param response:
        :return:
        """
        response_content = response.text
        response_content = json.loads(response_content)
        label = response_content['data']['tags']
        with open('label.csv', 'a', encoding='utf-8') as file:
            for l in label:
                file.write(l['name'] + ',' + str(l['num']) + '\n')

    def parse_label_url(self, bookAndHref, parseType = 0):
        """
        http://product.dangdang.com/index.php?r=comment/label&&categoryPath=01.03.45.00.00.00
        :param bookAndHref:
        :param parseType:
        :return: 
        """
        url_forward = 'http://product.dangdang.com/index.php?r=comment/label&'
        productId = 'productId='
        categoryPath = self.map[parseType]
        answer = []
        for key in bookAndHref:
            href = bookAndHref[key]
            href = href.split('?')[0]
            bookId = href.split('/')[-1]
            bookId = bookId.split('.')[0]
            url = url_forward + productId + bookId + '&&' + categoryPath
            answer.append(url)
        return answer