# -*- coding: utf-8 -*-
import copy

import scrapy

from novel.items import NovelItem, CharpterItem


class HaobqgSpider(scrapy.Spider):
    name = 'haobqg_init'
    allowed_domains = ['www.haobqg.com']
    start_urls = [
        'http://www.haobqg.com/',
        'http://www.haobqg.com/xuanhuanxiaoshuo/',
        'http://www.haobqg.com/xiuzhenxiaoshuo/',
        'http://www.haobqg.com/dushixiaoshuo/',
        'http://www.haobqg.com/wangyouxiaoshuo/',
        'http://www.haobqg.com/kehuanxiaoshuo/',
        'http://www.haobqg.com/wanben/1_1',
        'http://www.haobqg.com/paihangbang/',
    ]

    def parse(self, response):
        href=response.css('div#main  a::attr(href)').extract()
        for a in href:
            yield  response.follow(a,callback=self.parse_novel)

    def parse_novel(self, response):
        item = NovelItem()

        try:
            type = response.css('div.con_top a::text').extract()[-1]
            if type == '好笔趣阁':
                type = '其他'
        except:
            type = '其他'

        try:
            name = response.css('div#info h1::text').extract_first()
        except:
            return
        try:
            author = response.css('div#info p::text').extract_first().split('：')[-1]
        except:
            author = "未知"
        try:
            update = response.css('div#info p::text').extract()[-3].split('：')[-1]
        except:
            update="未知"
        introduction = response.css('div#intro p::text').extract_first()
        img_url = response.css('div#fmimg script::attr(src)').extract_first()
        chapter_list = response.css('dd a::attr(href)').extract()

        if author == "" or author == None:
            author = "未知"

        item['type'] = type.replace('小说', '')
        item['source'] = response.url
        item['name'] = name
        item['author'] = author
        item['update'] = update
        item['introduction'] = introduction
        item['chapter'] = [response.urljoin(link) for link in chapter_list]
        # item['latest']= response.css('dd a::text').extract()[-1]

        yield response.follow(img_url, callback=self.parse_img, meta={'item': copy.deepcopy(item)})

    def parse_img(self, response):
        item = response.meta['item']
        html = str(response.body, encoding='utf-8')
        item['cover'] = html[html.find('src=\'') + 5: html.find('\' width')]
        if item['name'] != None and item['name'] != "":
            yield copy.deepcopy(item)
            #
            # for c in item['chapter']:
            #     yield response.follow(c, callback=self.parse_chapter)

    def parse_chapter(self, response):
        """
        插入章节
        :param response:
        :return:
        """
        charpter = CharpterItem()
        source = response.url
        title = response.css('div.bookname h1::text').extract_first()
        context = response.css('div#content::text').extract()
        text = "\r\n".join(context)

        try:
            nid = getattr(self, 'id', None)
            no = response.meta.get('no')
            if nid != None and no != None:
                charpter['id'] = nid
                charpter['no'] = no
        except:
            charpter['id'] = None
            charpter['no'] = None

        charpter['title'] = title
        charpter['text'] = text
        charpter['source'] = source
        yield charpter