# -*- coding: utf-8 -*-
import scrapy
from comicscrapy.items import Manhua163Item
from  selenium import webdriver
import time
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.select import Select

class MamhuaSpiderSpider(scrapy.Spider):
    name = 'mamhua_spider'
    allowed_domains = ['manhua.sfacg.com']
    offset = 1
    url = 'https://manhua.sfacg.com/catalog/default.aspx?tid=-1&PageIndex='
    start_urls = [url + str(offset)]


    def parse(self, response):
        # print(response.text)
        comic_list = response.xpath("//ul[contains(@class,'Comic')]")
        for comic in comic_list:
            comic_item = Manhua163Item()
            comic_item['book_name'] = comic.xpath('./li[2]/strong/a/text()').extract()
            comic_item['book_des'] = comic.xpath('./li[2]/text()[5]').extract()[0].strip()
            comic_item['book_type'] = comic.xpath('./li[2]/a[2]/text()').extract()
            comic_item['book_img'] = 'https:' + comic.xpath('./li[1]/a/img/@src').extract()[0]
            comic_item['book_url'] = 'https:' + comic.xpath('./li[1]/a/@href').extract()[0]
            comic_item['book_id'] = comic.xpath('./li[1]/a/@href').extract()[0].split('/')[-2]
            # print(comic_item)
            yield scrapy.Request(url=comic_item['book_url'], meta={'item': comic_item}, callback=self.detail_parse)
# 多页爬取
#         if self.offset < 37:
#             self.offset += 1
#             yield scrapy.Request(self.url + str(self.offset), callback=self.parse)

    def detail_parse(self, response):
        item = Manhua163Item()
        item0 = response.meta['item']
        item['book_name'] = item0['book_name']
        item['book_des'] = item0['book_des']
        item['book_type'] = item0['book_type']
        item['book_img'] = item0['book_img']
        item['book_url'] = item0['book_url']
        item['book_id'] = item0['book_id']
        item['book_area'] = response.xpath('/html/body/div[2]/div[3]/ul[2]/li[2]/a[1]/text()').extract()
        item['book_popularity'] = response.xpath('/html/body/div[2]/div[3]/ul[2]/li[2]/span[9]/text()').extract()
        item['content_url'] = 'https://manhua.sfacg.com/' + response.xpath('/html/body/div[2]/div[3]/ul[2]/li[2]/div/a[1]/@href').extract()[0]
        chapters = response.xpath("//div[@class='comic_Serial_list']/a")
        chapter_list = []
        # print(item)
        for charper in chapters:
            # print(item['book_url'])
            try:
                chapter_name = charper.xpath("./text()").extract()[0]
            except:
                chapter_name = charper.xpath("./font/text()").extract()[0]

            # print(chapter_name)
            chapter_id = charper.xpath("./@href").extract()[0].split('/')[-2]
            chapter_url = 'https://manhua.sfacg.com'+ charper.xpath("./@href").extract()[0]
            chapter_content = {'chapter_id':chapter_id,'chapter_name':chapter_name, 'chapter_url':chapter_url}
            chapter_list.append(chapter_content)
            yield scrapy.Request(url=item['content_url'], meta={'item': item}, callback=self.contentImg)
        item['book_chapter'] = chapter_list



    def contentImg(self, response):
        item = Manhua163Item()
        item0 = response.meta['item']
        item['book_name'] = item0['book_name']
        item['book_des'] = item0['book_des']
        item['book_type'] = item0['book_type']
        item['book_img'] = item0['book_img']
        item['book_url'] = item0['book_url']
        item['book_area'] = item0['book_area']
        item['book_id'] = item0['book_id']
        item['book_popularity'] = item0['book_popularity']
        item['book_chapter'] =  item0['book_chapter']
        item['content_url'] = item0['content_url']

# linux 下面设置
#         chrome_options = Options()
#         chrome_options.add_argument('--headless')
#         chrome_options.add_argument('--no-sandbox')
#         chrome_options.add_argument('--disable-gpu')
#         chrome_options.add_argument('--disable-dev-shm-usage')
#         browser = webdriver.Chrome(chrome_options=chrome_options)

        browser = webdriver.Chrome()
        img_content_list =[]

        browser.get(response.url)
        for k in range(1,len(item['book_chapter'])+1):
            img_list = []
            current_url = browser.current_url
            chapter_id = current_url.split('/')[-2]
            selector = Select(browser.find_element_by_id('pageSel'))
            for j in range(1,len(selector.options)+1):

                img_url = browser.find_element_by_xpath("//img[@id='curPic']").get_attribute('src')
                img_list.append(img_url)
                browser.find_elements_by_link_text('下一页')[0].click()
            img_content = {'chapter_id': chapter_id, 'img_list':img_list}
            img_content_list.append(img_content)
            item['content'] = img_content_list
            browser.find_elements_by_link_text('下一回')[0].click()
            if (browser.find_element_by_xpath('//*[@id="Tips"]').is_displayed()):
                break
        browser.quit()
        print(item)
        yield item








