from lxml import etree
import requests
from result import *
import index
from urllib.parse import parse_qs


def get_chapter(link, p_chapter_name, platform):
    """
    获取书相关的属性 包含  书ID、书名、章节ID、章节名称
    """
    try:
        data = requests.get(link)  # 解析html
        r = data.content
        html_doc = str(r, 'utf-8')   # 此举旨在正确编码，避免乱码
        html = etree.HTML(html_doc)

        book_info = html.xpath("//div[@class='header']")[0]  # 书信息
        book_id_url = book_info.xpath(".//a[1]/@href")[0]  # 书ID
        book_name = book_info.xpath(".//a[1]/text()")[0]  # 书名
        chapter_name = html.xpath("//div[@class='article']/h2/text()")[0]  # 章节名

        url_info = parse_qs(book_id_url)
        book_id = url_info['bid'][0]
        chapter_id = book_id + "-" + chapter_name[chapter_name.index("第") + 1: chapter_name.index("章")]  # 章节ID
        # book_id = book_id[book_id.index("&bid") + 5:]
        content_list = []  # 章节内容
        for item in html.xpath("//div[@class='article']/p"):
            text = item.text
            if text is not None:
                text = item.text.strip()  # 移除内容开头和结尾（默认为空格或换行符）或字符序列
                content_list.append(text)

        if p_chapter_name != chapter_name.strip():   # 判断传入的章节名称和抓取的章节名称是否一致
            print("章节名称不一致", "传：", p_chapter_name, "抓：", chapter_name)
        # 将章节内容 写入数据库
        index.insertData(book_id, book_name.strip(), chapter_id, p_chapter_name, content_list, "2019-04-30 03:03:03", 0, platform, link)
        return success(content_list)
    except Exception as ex:
        print(p_chapter_name, "小说内容异常", ex)
        return error(1, "数据异常")


if __name__ == "__main__":
    print(get_chapter("http://www.ireader.com/index.php?ca=Chapter.Index&pca=Chapter.Index&bid=10175636&cid=2", "第2章：医圣传承", "zhangyue"))

