# coding=utf-8

# from index import insertData
import index

from selenium import webdriver  # 从selenium导入webdriver
from selenium.webdriver.common.by import By

from lxml import etree
import requests
import re
import random
import result

def getNovel(link, bookName, bookId, chapterName, platform):
    # user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
    # co = webdriver.ChromeOptions()
    # # co.add_argument('--headless')
    # co.add_argument('--user-agent={}'.format(user_agent))
    # driver = webdriver.Chrome(
    #     chrome_options=co
    # )

    # driver.get(link)

    # chapter_id = driver.find_element_by_xpath('//*[@id="readerFt"]/div/div[1]').get_attribute('chapterid')

    # contentList = []
    # chapterName = driver.find_element_by_xpath(
    #     '//*[@id="readerFt"]/div/div[2]/div[2]').text

    # for item in driver.find_elements_by_xpath('//*[@id="readerFt"]/div/div[5]/p'):
    #     # print(item.text)
    #     contentList.append(item.text.strip().encode('utf8'))
    #     # print(contentList)

    # index.insertData(bookId, bookName, chapter_id, chapterName, contentList, platform, link)

    # driver.quit()

    """
    获取书相关的属性 包含  书ID、书名、章节ID、章节名称
    """
    user_agent = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'
    ]
    headers = {
        'User-Agent': random.choice(user_agent),  # 浏览器头部
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',  # 客户端能够接收的内容类型
        'Accept-Language': 'zh-CN,zh;q=0.9',  # 浏览器可接受的语言
        'Connection': 'keep-alive',  # 表示是否需要持久连接
        'Host': 'book.zongheng.com',
        'Accept-Encoding': 'gzip, deflate',
        'Referer': link,
    }
    data = requests.get(link, headers=headers)  # 解析html
    r = data.content
    html_doc = str(r, 'utf-8')   # 此举旨在正确编码，避免乱码
    html = etree.HTML(html_doc)

    print(html_doc)
    ids = link[(link.index('xbiquge') + 14):(len(link) - 5)].split('/')

    book_id = ids[0]  # 书ID
    book_name = html.xpath('//*[@id="wrapper"]/div[4]/div/div[1]/a[3]/text()')[0]  # 书名
    print(book_name)
    chapter_id = ids[1]  # 章节ID
    chapter_name = html.xpath('//*[@id="wrapper"]/div[4]/div/div[2]/h1/text()')[0].strip()  # 章节名
    chapter_name = chapter_name[(chapter_name.index(' ') + 1):]
    last_update_time = '1970-1-1'  # 更新时间
    content_list = []  # 章节内容
    contentArr = html.xpath('//*[@id="content"]/text()')[0].split('''
<br />
<br />''')
    for i in range(len(contentArr)):
        if (i < len(contentArr) - 1):
            itme = contentArr[i]
            text = item.strip().replace('&nbsp;', '')  # 移除内容开头和结尾（默认为空格或换行符）或字符序列
            if re.match("注.+?推荐|注.+?收藏|^……$", text) is None:  # 如果内容里有 一些拉票的描述 则忽略点
                content_list.append(text)

    index.insertData(book_id, book_name, chapter_id, chapter_name, content_list, last_update_time, 0, platform, link)

    return result.success(content_list)