# -*- coding: utf-8 -*-
import copy

import scrapy

from novel.dao import noveldao, charpterdao
from novel.items import NovelItem, CharpterItem


class HaobqgSpider(scrapy.Spider):
    name = 'haobqg'
    allowed_domains = ['www.haobqg.com']


    def start_requests(self):
        self.control_map = {
            "updata": self.parse_updata_chapter,
            "novel": self.parse_novel,
            "chapter": self.parse_chapter,
        }

        href = getattr(self, 'href', None)  # 获取tag值，也就是爬取时传过来的参数
        type = getattr(self, 'type', "novel")  # 获取tag值，也就是爬取时传过来的参数

        if type=='updata':
            nid=getattr(self, 'id', None)
            if nid is not None:
                novel=noveldao.findById(nid)
                if novel!=None:
                    href =novel[6]
        elif type=='chapter':
            href=getattr(self, 'url', None)

        if href is not None:
            yield scrapy.Request(href,callback=self.control_map[type], meta={'no':  getattr(self, 'no', None)})


    def parse_novel(self,response):
        item = NovelItem()

        try:
            type=response.css('div.con_top a::text').extract()[-1]
            if type=='好笔趣阁':
                type = '其他'
        except:
            type='其他'

        name=response.css('div#info h1::text').extract_first()
        author=response.css('div#info p::text').extract_first().split('：')[-1]
        update=response.css('div#info p::text').extract()[-3].split('：')[-1]
        introduction=response.css('div#intro p::text').extract_first()
        img_url=response.css('div#fmimg script::attr(src)').extract_first()
        chapter_list = response.css('dd a::attr(href)').extract()
        chapter_title_list = response.css('dd a::text').extract()

        if author=="" or author==None:
            author="未知"


        item['type']=type.replace('小说','')
        item['source']=response.url
        item['name']=name
        item['author']=author
        item['update']=update
        item['introduction']=introduction

        chapter=[]

        for idx,link in enumerate(chapter_list):
            chapter.append({
                "title": chapter_title_list[idx],
                "linke": response.urljoin(chapter_list[idx])
            })

        item['chapter']=chapter
        # item['latest']= response.css('dd a::text').extract()[-1]

        yield response.follow(img_url, callback=self.parse_img, meta={'item':  copy.deepcopy(item)})


    def parse_img(self,response):
        item = response.meta['item']
        html=str(response.body, encoding='utf-8')
        item['cover']=html[html.find('src=\'') + 5: html.find('\' width')]
        if item['name']!=None and item['name']!="":
            yield copy.deepcopy(item)

            # for c in item['chapter']:
            #     yield  response.follow(c,callback=self.parse_chapter)

    def parse_chapter(self,response):
        """
        插入章节
        :param response:
        :return:
        """
        charpter={}
        source=response.url
        title=response.css('div.bookname h1::text').extract_first()
        context=response.css('div#content::text').extract()
        text="\r\n".join(context)

        nid = getattr(self, 'id', None)
        no = response.meta.get('no')
        if nid != None and no != None:
            charpter['id'] = nid
            charpter['no'] = no

            charpter['title']=title
            charpter['text']=text
            charpter['source']=source
            charpterdao.insertCharpter(charpter)
            noveldao.refreshTime(nid)


    def parse_updata_chapter(self, response):
        chapter_list = response.css('dd a::attr(href)').extract()
        chapter_title_list = response.css('dd a::text').extract()

        charpter = CharpterItem()
        charpters=[]
        novels=noveldao.findByIdCharpter(getattr(self, 'id', None))
        if len(chapter_list) > len(novels):
            chapter_list=chapter_list[len(novels):]
            chapter_title_list=chapter_title_list[len(novels):]
            for idx, c in enumerate(chapter_list):
                charpters.append({
                    "title": chapter_title_list[idx],
                    "linke": response.urljoin(chapter_list[idx]),
                    "no":len(novels)+idx+1 ,
                })

            charpter['chapter'] = charpters
            charpter['id'] =getattr(self, 'id', None)
            yield charpter
        noveldao.refreshTime(getattr(self, 'id', None))
