# -*- coding: utf-8 -*-
import scrapy
import re
import os
from scrapy.http import Request
import time

from builtins import enumerate


class NovelSpider(scrapy.Spider):
    # 配置服务名称 与上文创建服务的名称相同
    name = 'novel'
    # 允许访问的域 与上文常见服务的名称相同
    allowed_domains = ['m.book9.net']
    # 发起请求的url 小说首页
    start_urls = ['https://m.book9.net/wapbook/25_all.html']

    base_url="https://m.book9.net"


    def __init__(self, name=None, **kwargs):
        super().__init__(name=None, **kwargs)

    def parse(self, response):
        # 获取指定的章节的连接
        context = response.xpath('/html/body/div[2]/p[position()>1]/a/@href')
        print(context)
        # 提取数据的第一个结果 即最新一章
        # url = context.extract_first()
        # print(url)
        for index,url in enumerate(context.extract()):

            # 获取短连接手继续请求，并将结果返回指定的后调
            print('---------', index, '-', url, '-------------')
            yield response.follow(url=url, callback=self.parse_article)


    def parse_article(self, reponse):
        # 获取文章标题
        print(reponse.url)
        url=reponse.url
        szre=re.compile(r'_(\d*)')
        index=szre.findall(url)[0]
        print(reponse)
        title = self.generate_title(reponse)
        print(title)
        # 构建文章的html
        html = self.build_article(title, reponse)
        # 将html存至本地
        self.save_file(index+title + '.html', html)
        # 利用自带浏览器打开本地html
        # os.system("open " + title.replace(" ", "\ ") + ".html")
        print(reponse)
        pass

    @staticmethod
    def generate_title(reponse):
        title = reponse.xpath('//*[@id="read"]/div[1]/text()').extract()
        return "".join(title).strip()

    @staticmethod
    def build_article(title, reponse):
        context = reponse.xpath('//*[@id="chaptercontent"]').extract_first()
        # 忽略那些个<a>标签跳转内容
        re_c = re.compile('<\s*a[^>]*>[^<]*<\s*/\s*a\s*>')
        article = re_c.sub("", context)
        # 拼接文章html
        html = '<html><meta http-equiv="Content-Type" content="text/html; charset=utf-8" /><div align="center" style="width:100%;text-alight:center"><b><font size="5">' \
               + title + '</font></b></div>' + article + "</html>"
        return html

    @staticmethod
    def save_file(filename, html):
        if not os.path.exists("tianying"):
            os.mkdir("tianying")
        filename="tianying/"+filename
        fh = open(filename, 'wb')
        fh.write(html.encode(encoding='utf-8'))
        fh.close()
        pass


if __name__ == '__main__':
    from scrapy import cmdline

    cmdline.execute("scrapy crawl novel".split())
