# -*- coding: utf-8 -*-
import re
import time
import scrapy
from scrapy.http import Request
from ManhuaSpider.items import MhInfoItem, MhSectionInfoItem, MhImageItem, MhUpdateItem, CategoryItem
from ManhuaSpider.libs.user_agent import get_user_agent
from urllib import parse as url_parse
from ManhuaSpider.libs.comment import _check_section_urls, _get_current_time, _datetime_to_time


class MhSpider(scrapy.Spider):
    name = 'mh'
    allowed_domains = ['www.36mh.com']
    start_urls = ['https://www.36mh.com/list/']
    base_url = 'https://www.36mh.com'
    headers = {
        'User_Agent': get_user_agent(),
    }
    image_url_base = 'https://img001.yayxcc.com/'   # 图片基地址
    test_record_max = 3
    test_record_index = 1

    def parse(self, response):
        #爬取 类型
        if 'https://www.36mh.com/list/' == response.url:
            xres = response.xpath('//*[@id="w0"]/div[2]/div[3]/ul/li/a/text()').extract()
            res_list = list(set(xres))[1::]
            for item in res_list:
                cateItem = CategoryItem()
                cateItem['m_cate_id'] = res_list.index(item) + 1
                cateItem['m_cate_name'] = item
                yield cateItem
        # 处理漫画列表
        list_mh_id = response.xpath('//ul[@id="contList"]/li[@class="item-lg"]/@data-key').extract()
        list_mh_url = response.xpath('//ul[@id="contList"]/li[@class="item-lg"]/a/@href').extract()

        set_mh = set(zip(list_mh_id, list_mh_url))

        for key, value in set_mh:
            #if self.test_record_index <= self.test_record_max:
            mid = int(key)
            _url = value
            yield Request(url=_url, meta={'mid': mid}, callback=self.mh_info, headers=self.headers)
            #self.test_record_index +=1

        # 下一页循环
        _next = response.xpath('//li[@class="next"]/a/@href').extract()
        if _next:
            next_page_url = url_parse.urljoin(self.base_url, _next[0])
            if next_page_url != response.url:
                yield Request(url=next_page_url, callback=self.parse)


    def mh_info(self, response):
        """漫画详情页"""
        item = MhInfoItem()
        mid = response.meta['mid']

        image = response.xpath('//div[@class="book-cover item-xl fl"]/p[@class="cover"]/img/@src').extract()
        title = response.xpath('//div[@class="book-title"]/h1/span/text()').extract()
        area = response.xpath('//ul[@class="detail-list cf"]/li[1]/span[1]/a/text()').extract()
        categorys = response.xpath('//ul[@class="detail-list cf"]/li[2]/span[1]/a/text()').extract()
        arthor = response.xpath('//ul[@class="detail-list cf"]/li[2]/span[2]/a/text()').extract()
        state = response.xpath('//ul[@class="detail-list cf"]/li[@class="status"]/span/a[1]/text()').extract()
        latest = response.xpath('//ul[@class="detail-list cf"]/li[@class="status"]/span/a[2]/text()').extract()
        update_time = response.xpath('//ul[@class="detail-list cf"]/li[@class="status"]/span/span/text()').extract()
        description = response.xpath('//div[@class="book-intro"]/div[@id="intro-all"]/p/text()').extract()

        item['m_create_time'] = _get_current_time()
        item['m_status'] = 1
        item['m_id'] = mid
        item['m_title'] = title[0]
        item['m_arthor'] = arthor[0]

        item['m_description'] = description[0].replace('/n', '').strip()
        item['m_area'] = area[0]

        item['m_image'] = image[0]      # 头图 默认 list
        item['m_category'] = '|'.join(categorys)

        item['referer'] = response.url
        yield item

        utem = MhUpdateItem()
        utem['m_create_time'] = _get_current_time()
        utem['m_status'] = 1
        utem['m_id'] = mid
        utem['m_state'] = state[0]
        utem['m_update_time'] = _datetime_to_time(update_time[0])
        utem['m_latest'] = latest[0]
        yield utem

        section_urls = response.xpath('//div[@class="chapter-body clearfix"]/ul/li/a/@href').extract()
        section_titles = response.xpath('//div[@class="chapter-body clearfix"]/ul/li/a/span/text()').extract()

        is_run = _check_section_urls(section_urls)
        _current_time = _get_current_time()
        if not is_run:
            for _item in section_titles:
                s_item = MhSectionInfoItem()
                s_item['m_create_time'] = _current_time
                s_item['m_status'] = 1
                s_item['m_title'] = _item
                s_item['m_id'] = mid
                s_item['m_section_id'] = -1
                s_item['m_copyright'] = 0
                yield s_item
        else:
            self._set_header_referer(response)
            set_section = set(zip(section_titles, section_urls))
            for key,value in set_section:
                s_item = MhSectionInfoItem()
                _url = url_parse.urljoin(self.base_url, value)
                _sid = int(re.findall(r'\d+', value)[0])
                s_item['m_create_time'] = _current_time
                s_item['m_status'] = 1
                s_item['m_id'] = mid
                s_item['m_title'] = key
                s_item['m_section_id'] = _sid
                s_item['m_copyright'] = 1
                yield s_item
                yield Request(url=_url, meta={'mid': mid, 'sid': _sid}, callback=self.content_images, headers=self.headers)

    def content_images(self, response):
        mid = response.meta['mid']
        sid = response.meta['sid']
        center = response.xpath('//body[@class="clearfix"]/script[1]/text()').extract()

        center_list = list(set(center[0].split(';')))
        # 找图片链接地址
        _tmp_url_base = 'var chapterPath = '
        tmp_url_base = [item for item in center_list if _tmp_url_base in item][0]
        current_base_url = tmp_url_base.split('"')[1]
        # 拼接完整的图片链接地址
        _tmp_urls = 'var chapterImages = '
        str_images = [item for item in center_list if _tmp_urls in item][0]
        _str1 = str_images.split('"')
        _images_urls = [item for item in list(_str1) if '.jpg' in item]

        images_urls = [self.image_url_base+current_base_url+item for item in _images_urls]

        _current_time = _get_current_time()
        _url = response.url
        _count = len(images_urls)
        _images_urls_set = set(zip(list(range(1,_count+1)), images_urls))
        for key,img in _images_urls_set:
            _item = MhImageItem()
            _item['m_create_time'] = _current_time
            _item['m_status'] = 1
            _item['m_id'] = mid
            _item['m_section_id'] = sid
            _item['m_image_id'] = key
            _item['m_image'] = url_parse.urljoin(self.base_url, img)
            _item['referer'] = _url
            yield _item

    def _set_header_referer(self, _response=None):
        """给header设置上一个页面地址"""
        _url = _response.url if _response else self.base_url
        self.headers.update({'Referer': _url})
