# coding=utf-8

# from index import insertData
import index

# from selenium import webdriver  # 从selenium导入webdriver
# from selenium.webdriver.common.by import By

from lxml import etree
import requests
import re
import random
import result
import aiohttp
import asyncio
from multiprocessing import Pool

sem = asyncio.Semaphore(10)  # 信号量，控制协程数，防止爬的过快

user_agent = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
]
headers = {
    'User-Agent': random.choice(user_agent),  # 浏览器头部
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',  # 客户端能够接收的内容类型
    'Accept-Language': 'zh-CN,zh;q=0.9',  # 浏览器可接受的语言
    'Connection': 'keep-alive',  # 表示是否需要持久连接
    'Host': 'book.zongheng.com',
    'Accept-Encoding': 'gzip, deflate',
}

'''
提交请求获取AAAI网页html
'''
async def get_html(url):
    headers['Referer'] = url
    with(await sem):
        # async with是异步上下文管理器
        async with aiohttp.ClientSession() as session:  # 获取session
            async with session.get(url, headers=headers) as resp:  # 提出请求
                html = await resp.text()  # 直接获取到html
                print('异步获取%s下的html.' % url)
                __etreeHTML(html, url)

'''
协程调用方，请求网页
'''
def main_get_html(urls):
    loop = asyncio.get_event_loop()           # 获取事件循环
    tasks = [get_html(url) for url in urls]  # 把所有任务放到一个列表中
    loop.run_until_complete(asyncio.wait(tasks))  # 激活协程
    loop.close()  # 关闭事件循环

# 返回当前章节的小说
def getNovel(link, bookName, bookId, chapterName):
    _result = __getNovel(link, bookName, bookId, chapterName)

    catelogues_link = _result[1].xpath('//*[@id="uiGPReaderAct"]/div[1]/a[1]/@href')[0]

    getWholeNovel(catelogues_link, bookName, bookId)

    return result.success(_result[0])

# 抓取整篇小说
def getWholeNovel(link, bookName, bookId):
    print(link)
    headers['Referer'] = link

    data = requests.get(link, headers=headers)  # 解析html
    r = data.content
    html_doc = str(r, 'utf-8')   # 此举旨在正确编码，避免乱码
    html = etree.HTML(html_doc)

    urls = []

    for chapter in html.xpath('/html/body/div[3]/div[2]/div[2]/div/ul/li[not(contains(@class,"vip"))]'):
        a_info = chapter.xpath('./a')[0]

        href = a_info.get('href')

        urls.append(href)

    main_get_html(urls)


def __getNovel(link, bookName, bookId, chapterName):
    """
    获取书相关的属性 包含  书ID、书名、章节ID、章节名称
    """
    headers['Referer'] = link

    data = requests.get(link, headers=headers)  # 解析html
    r = data.content
    html_doc = str(r, 'utf-8')   # 此举旨在正确编码，避免乱码

    return __etreeHTML(html_doc, link)


def __etreeHTML(html_doc, link):
    html = etree.HTML(html_doc)

    book_id = html.xpath("/html/body/@bookid")[0]  # 书ID
    book_name = html.xpath("/html/body/@bookname")[0]  # 书名
    chapter_id = html.xpath("/html/body/@chapterid")[0]  # 章节ID
    chapter_name = html.xpath("/html/body/@chaptername")[0]  # 章节名
    last_update_time = html.xpath(
        '//*[@id="readerFt"]/div/div[3]/span[2]/text()')[0]  # 更新时间
    content_list = []  # 章节内容
    for item in html.xpath('//*[@id="readerFt"]/div/div[5]/p'):
        text = item.text.strip()  # 移除内容开头和结尾（默认为空格或换行符）或字符序列
        if re.match("注.+?推荐|注.+?收藏|^……$", text) is None:  # 如果内容里有 一些拉票的描述 则忽略点
            content_list.append(text)

    index.insertData(book_id, book_name.strip(), chapter_id, chapter_name.strip(), content_list, last_update_time, 0, 'zongheng', link)
    
    print('获取%s下的html完成.' % link)

    return [content_list, html]
