import scrapy
from demo2.items import cmreadItem
# scrapy crawl cmread -o cmread.csv

class CmreadSpider(scrapy.Spider):
    name = "cmread"
    allowed_domains = ["cmread.com"]
    start_urls = ["https://n.cmread.com/r/p/nsb.jsp?dSDaipTi=0MnKenalqEo03bX4Ju2p1kE_ekUnsRG34CciTFhngxKgDNI2Ny3W13h2pClaY02fOk5tGdSvSXP9HGRWxiLLgLdikboOyijc2XcHO.P6fNMylqqDITUpvXzs7FxkLKpWjw32u13y_q1YbQcprBRWGDCuqW3uHpwbdDqvMjDuTk4IWzIwiD0J1AA",
                  "https://n.cmread.com/r/f/ms.rank.bookRankService/getBookRankings.json?dSDaipTi=0E4UHkGlqEteRlMP8sNXN8vOFlC7Be_PUGwIq4t8ePuAcIQpu2TMkGik4oHzlUDguAz1R9yx0eiV3bXUzFlwjxIGtTE.WUZ2pk5MLGQvGnDT4pOwh0.KEN4y3gmFYW2ExdNtiTGHXw6lgj2hcmC3kA5k4S_V_7EoMSiOlyEEwGZwrYFq0ySYyvU6fxJc0wOo_hYIJb4IDQVBJJiRMw.HyKUQRmSQ3XTSoZ3E3x_nh.Y.VL8f3W5PNpsR9.h1SWgDj3T16kLnrBaph5T4ecYxqcPzg4irNn8dLYw7_Lokg9qFGKb..iSO4QSlj68OAH8bk6PRKvnENx0.Ex4EiScc2tIPjfkq2azdvMq"]
    def start_requests(self):
        user_agent = self.settings.get('USER_AGENT')
        headers = {
            "Referer": "https://n.cmread.com/r/p/phsy.jsp?is_np=1",
            "User-Agent": user_agent
        }
        yield scrapy.Request(url=self.start_urls[0], headers=headers, callback=self.parse)
        yield scrapy.Request(url=self.start_urls[1], headers=headers, callback=self.parse_page)

    def parse(self, response):
        json_data = response.json()
        book_datas = json_data['2418']['data']['rankItems']
        for data in book_datas:
            item = cmreadItem()

            item['book_name'] = data.get('bookName')
            item['book_description'] = data.get('bookDesc')
            # item['book_cover'] = data.get('bookCover')
            item['book_category'] = data.get('bookCategory')
            item['book_finished'] = data.get('bookFinished')
            # item['click_value'] = data.get('clickValue')
            # item['rank_change'] = data.get('rankChange')
            # item['book_url'] = data.get('bookUrl')
            item['total_chapter_num'] = data.get('totalChapterNum')

            author_and_translator_list = data.get('authorAndTranslatorList', [])
            if author_and_translator_list:
                first_author = author_and_translator_list[0]
                item['author_name'] = first_author.get('authorAndTranslatorName')
                # item['author_detail_url'] = first_author.get('authorAndTranslatorDetailUrl')
                # item['is_author'] = first_author.get('isAuthor')
            else:
                item['author_name'] = None
                # item['author_detail_url'] = None

            yield item

    def parse_page(self, response):
        json_data = response.json()
        book_datas = json_data['data']['rankItems']
        for data in book_datas:
            item = cmreadItem()

            item['book_name'] = data.get('bookName')
            item['book_description'] = data.get('bookDesc')
            # item['book_cover'] = data.get('bookCover')
            item['book_category'] = data.get('bookCategory')
            item['book_finished'] = data.get('bookFinished')
            # item['click_value'] = data.get('clickValue')
            # item['rank_change'] = data.get('rankChange')
            # item['book_url'] = data.get('bookUrl')
            item['total_chapter_num'] = data.get('totalChapterNum')

            author_and_translator_list = data.get('authorAndTranslatorList', [])
            if author_and_translator_list:
                first_author = author_and_translator_list[0]
                item['author_name'] = first_author.get('authorAndTranslatorName')
                # item['author_detail_url'] = first_author.get('authorAndTranslatorDetailUrl')
                # item['is_author'] = first_author.get('isAuthor')
            else:
                item['author_name'] = None
                # item['author_detail_url'] = None

            yield item
