# -*- coding: utf-8 -*-
import scrapy
import json
from comicscrapy.items import Manhua163Item
import re
from selenium import webdriver
import time
from selenium.webdriver.chrome.options import Options
from threading import Thread


class Comic163SpiderSpider(scrapy.Spider):
    name = 'comic163_spider'
    allowed_domains = ['manhua.163.com']
    url = 'https://manhua.163.com/category/getData.json?sort=2&sf=1&pageSize=72&page='
    offset = 0
    start_urls = [url + str(offset)]

    def parse(self, response):
        jtext = response.text
        books = json.loads(jtext)['books']
        # print(books)
        for book in books:
            item = Manhua163Item()
            item['book_name'] = book['title']
            item['book_des'] = book['description']
            item['book_img'] = book['cover']
            item['book_id'] = book['bookId']
            item['book_url'] = 'https://manhua.163.com/source/'+book['bookId']
            print(item)
            yield scrapy.Request(url=item['book_url'], meta={'item': item}, callback=self.detail_parse)
            # yield scrapy.Request(url='https://manhua.163.com/source/4771945676810120861', meta={'item': item}, callback=self.detail_parse)

        # if self.offset < 71:
        #     self.offset += 1
        #     yield scrapy.Request(self.url + str(self.offset), callback=self.parse)

    def detail_parse(self, response):
        item = Manhua163Item()
        item0 = response.meta['item']
        item['book_type'] = response.xpath("//dl[contains(@class,'sr-dl')]/dd[2]/a/text()").extract()
        item['book_popularity'] = response.xpath("//dl[contains(@class,'sr-dl')]/dd[3]/span/text()").extract()
        item['book_name'] = item0['book_name']
        item['book_des'] = item0['book_des']
        item['book_img'] = item0['book_img']
        item['book_url'] = item0['book_url']
        item['book_id'] = item0['book_id']
        bookId = item0['book_id']
        content_id = response.xpath("//a[contains(@class,'sr-btn')]/@href").extract()
        # 漫画书具体内容url
        item['content_url'] = 'https://manhua.163.com' + content_id[0]
        # 漫画书章节url
        getchapterUrl = 'https://manhua.163.com/book/catalog/' + bookId
        # getchapterUrl = 'https://manhua.163.com/book/catalog/' + '5446751302920002755'
        yield scrapy.Request(url=getchapterUrl, meta={'item': item},callback=self.getChapters)

        # yield scrapy.Request(url='https://manhua.163.com/book/catalog/4771945676810120861', meta={'item': item}, callback=self.getChapters)

    def getChapters(self,response):
        item = Manhua163Item()
        item0 = response.meta['item']
        item['book_type'] = item0['book_type']
        item['book_popularity'] = item0['book_popularity']
        item['book_name'] = item0['book_name']
        item['book_des'] = item0['book_des']
        item['book_img'] = item0['book_img']
        item['book_url'] = item0['book_url']
        item['content_url'] = item0['content_url']
        item['book_id'] = item0['book_id']
        text = response.text
        # 去掉返回结果头尾的htmlbiaoq
        jtext = re.sub('<[^>]+>', '', text)
        sections = json.loads(jtext)['catalog']['sections'][0]['sections']
        chapters = []
        for each in sections:
            title = each['fullTitle']
            sectionId = each['sectionId']
            chapterUrl = 'https://manhua.163.com/reader/' + item['book_id'] + "/" + sectionId
            item['chapter_url'] = chapterUrl
            content = {'comicId': item['book_id'], 'sectionId': sectionId, 'title': title, 'chapterUrl': chapterUrl}
            chapters.append(content)
        item['book_chapter'] = chapters
        # print(item)
        # url_list = []
        # for i in item['book_chapter']:
        #     url = i['chapterUrl']
        #     url_list.append(url)
        # # print(url_list)
        # for new_url in url_list:
        yield scrapy.Request(url=item['book_chapter'][0]['chapterUrl'],meta={'item': item},callback=self.contentImg)
        # yield item
        # yield scrapy.Request(url='https://manhua.163.com/reader/4317076104890059052/4317076104890059053', meta={'item': item},
                             # callback=self.contentImg)


    def contentImg(self,response):

        # def imgspider():
            item = Manhua163Item()
            item0 = response.meta['item']
            item['book_type'] = item0['book_type']
            item['book_popularity'] = item0['book_popularity']
            item['book_name'] = item0['book_name']
            item['book_des'] = item0['book_des']
            item['book_img'] = item0['book_img']
            item['book_url'] = item0['book_url']
            item['book_id'] = item0['book_id']
            item['content_url'] = item0['content_url']
            item['book_chapter'] = item0['book_chapter']
            content = dict()

            chrome_options = Options()
            chrome_options.add_argument('--headless')
            browser = webdriver.Chrome(chrome_options=chrome_options)
            # browser = webdriver.Chrome()
            for i in item['book_chapter']:
                new_url = i['chapterUrl']
                sectionId = i['sectionId']
                browser.get(new_url)
                text = browser.page_source


                # 移动到网页下方
                for i in range(25):
                    browser.execute_script("window.scrollBy(0,2500)")
                    time.sleep(1)

                pattern = re.compile(r'url: window.IS_SUPPORT_WEBP [?](.*)')
                imgUrls = pattern.findall(text)
                l = []
                for each in imgUrls:
                    imgUrl = each.replace('\"', '').rstrip(',').strip()
                    imgUrl = imgUrl.split(' : ')
                    pattern = re.compile(r'NOSAccessKeyId=(.*)')
                    imgUrl = [re.sub(pattern, 'NOSAccessKeyId=c92f74b0d48f4fb39271a1109da74cc2', a).replace(' ', '') for a in imgUrl]
                    # print(imgUrl)
                    l.append(imgUrl)
                    # print(l)
                    content[sectionId] = l
                    # print(content)
                    item['content'] = content
                    print(item['content'])
            item['content'] = content
            print(item)
            yield item

        # t1 = Thread(target=imgspider)
        # t1.start()




