import scrapy
from scrapy.http import Request
from scrapy.selector import Selector
from doubanmovie.items import DoubanmovieItem
from urllib.parse import urljoin
from urllib.parse import quote
import pymysql
from doubanmovie import settings
from selenium import webdriver
import time
import csv
from selenium.webdriver.common.by import By

class Douban(scrapy.spiders.Spider):
    name = "douban"
    allowed_domains = ["douban.com"]

    connect = pymysql.connect(
        host=settings.MYSQL_HOST,
        db=settings.MYSQL_DBNAME,
        user=settings.MYSQL_USER,
        passwd=settings.MYSQL_PASSWD,
        charset='utf8',
        use_unicode=True)
    cursor = connect.cursor()
    # -------------- 豆瓣详情爬虫 start----------------
    # 详情页根据douban_id爬虫
    sql = "select book_douban_id from book_pool where id>29156 and book_info_html is null"
    cursor.execute(sql)
    bookList = cursor.fetchall()

    start_urls = []
    for item in bookList:
        url = "https://book.douban.com/subject/"+str(item[0])

        start_urls.append(url)

    def parse(self, response):
        selector = Selector(response)

        book_content = selector.xpath('//div[@id="info"]')

        # 定义一个字典，用于存放爬取的数据
        detail_item = {}

        # 经过分析，使用for循环的方式，好像并不是很合理
        try:
            # 书名
            book_name = selector.xpath("//h1/span/text()").extract()[0]
            detail_item["book_name"] = book_name.strip()

            # 评价
            book_evaluate = book_content.xpath("//strong/text()").extract()

            try:
                if (book_evaluate):
                    book_evaluate = book_evaluate[0]
                else:
                    book_evaluate = ""
            except Exception as err:
                book_evaluate = ""
            detail_item["book_evaluate"] = ""

            # 内容
            book_info_html = selector.xpath('//div[@id="info"]').extract()
            # if (book_desc_arr):
            #     book_desc = book_desc_arr[0]
            # else:
            #     book_desc = ""
            detail_item["book_info_html"] = ''.join(book_info_html)

            # print(detail_item,"测试==============",''.join(book_info_html))
            # exit()
            # # 作者
            # if (book_content.xpath("span[1]/a/text()").extract()):
            #     book_author = book_content.xpath("span[1]/a/text()").extract()[0]
            # else:
            #     book_author = book_content.xpath("span[./text()='作者:']/following::text()[2]").extract()[0]
            #
            # detail_item["book_author"] = book_author
            #
            # # 出版社
            # detail_item["book_press"] = book_content.xpath("span[./text()='出版社:']/following::text()[2]").extract()[0]
            #
            # # 出品方
            # book_producer_arr = book_content.xpath("span[./text()='出品方:']/following::text()[2]").extract()
            # if (book_producer_arr):
            #     book_producer = book_producer_arr[0]
            # else:
            #     book_producer = ""
            # detail_item["book_producer"] = book_producer
            #
            # # 副标题
            # book_subtitle_arr = book_content.xpath("span[./text()='副标题:']/following::text()[1]").extract()
            # if (book_subtitle_arr):
            #     book_subtitle = book_subtitle_arr[0]
            # else:
            #     book_subtitle = ""
            # detail_item["book_subtitle"] = book_subtitle
            #
            # # 原作名
            # book_original_name_arr = book_content.xpath("span[./text()='原作名:']/following::text()[1]").extract()
            # if (book_original_name_arr):
            #     book_original_name = book_original_name_arr[0]
            # else:
            #     book_original_name = ""
            # detail_item["book_original_name"] = book_original_name
            #
            # # 译者
            # book_translator_arr = book_content.xpath("span[./text()='译者:']/following::text()[1]").extract()
            # if (book_translator_arr):
            #     book_translator = book_translator_arr[0]
            # else:
            #     book_translator = ""
            # detail_item["book_translator"] = book_translator
            # # 出版年份
            # detail_item["book_press_date"] = book_content.xpath("span[./text()='出版年:']/following::text()[1]").extract()[
            #     0]
            # # 页数
            # book_pages_arr = book_content.xpath("span[./text()='页数:']/following::text()[1]").extract()
            # if (book_pages_arr):
            #     book_pages = book_pages_arr[0]
            # else:
            #     book_pages = ""
            # detail_item["book_pages"] = book_pages
            # # 价格
            # book_price_arr = book_content.xpath("span[./text()='定价:']/following::text()[1]").extract()
            # if (book_price_arr):
            #     book_price = book_price_arr[0].split('元')[0]
            # else:
            #     book_price = ""
            #
            # detail_item["book_price"] = book_price
            # # 装帧
            # book_paperback_arr = book_content.xpath("span[./text()='装帧:']/following::text()[1]").extract()
            # if (book_paperback_arr):
            #     book_pages = book_paperback_arr[0]
            # else:
            #     book_pages = ""
            # detail_item["book_paperback"] = book_pages
            # # 丛书
            # book_series_arr = book_content.xpath("span[./text()='丛书:']/following::text()[2]").extract()
            # if (book_series_arr):
            #     book_series = book_series_arr[0]
            # else:
            #     book_series = ""
            # detail_item["book_series"] = book_series
            #
            # # ISBN
            # detail_item["ISBN"] = (
            #     book_content.xpath("span[./text()='ISBN:']/following::text()[1]").extract()[0]).strip()

            # 内容简介
            book_desc_arr = book_content.xpath("//div[@id='link-report']//div[@class='intro']").extract()
            if (book_desc_arr):
                book_desc = book_desc_arr[0]
            else:
                book_desc = ""
            detail_item["book_desc"] = book_desc

            # 作者简介
            author_desc_arr = book_content.xpath("//div[@class='indent ']//div[@class='intro']").extract()
            if (author_desc_arr):
                author_desc = author_desc_arr[0]
            else:
                author_desc = ""
            detail_item["author_desc"] = author_desc

            detail_item['book_douban_id'] = response.url.split('/')[-2]

            # # 目录
            # try:
            #    book_catalogue_arr = book_content.xpath("//div[@id='dir_" + str(detail_item['book_douban_id']) + "_short']").extract()
            #    if (book_catalogue_arr):
            #         book_catalogue = book_catalogue_arr[0]
            #    else:
            #         book_catalogue = ""
            # except Exception as err:
            #     book_catalogue = ""
            #
            # detail_item["book_catalogue"] = book_catalogue

        except Exception as err:
            detail_item = {}
            exit();
        # print(detail_item);
        # exit();

        yield detail_item

        # if item[0]==160:
        #     yield Request(urljoin("https://book.douban.com/subject/", item[0]), callback=self.parse)
    # 豆瓣详情爬虫 ==============end===================