import requests  # 爬虫模块
import re  # 正则表达式
from lxml import html  # 获取小说内容模块


# 获取搜索页面的前十个连接(返回链接)
def get_link(name):
    header = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/96.0.4664.110 Safari/537.36 Edg/96.0.1054.62 '
    }
    url = 'https://www.qidian.com/search?kw=' + name
    url_1 = 'https://book.qidian.com/info/'  # 拼接链接
    res = requests.get(url, headers=header)
    html = res.text
    pattern = re.compile('<a.*?href="//book.qidian.com/info/(.*?)".*?>书籍详情</a>')
    items = re.findall(pattern, html)
    for i in range(0, len(items)):
        items[i] = url_1 + items[i]
    return items


# 获取标题
def headline(url):
    res = requests.get(url)
    html = res.text
    # 正则表达式
    text = re.search('<em>(.*?)</em>', html, re.S)
    return text[1]


# 获取章节标题（返回【0】章节标题【1】链接）
def get_section(url):
    url += '#Catalog'  # 拼接主链接
    url_1 = 'https://read.qidian.com/'  # 拼接
    html = requests.get(url).text
    pattern = re.compile('<a href="//read.qidian.com/(.*?)".*?>(.*?)</a>')
    items = re.findall(pattern, html)
    for i in range(len(items)):
        items[i] = list(items[i])
        items[i][0] = url_1 + items[i][0]
        items[i][0], items[i][1] = items[i][1], items[i][0]
    return items


# 本模块需要导入【from lxml import html】(返回文字列表)
def get_text(url, html=html):
    etree = html.etree
    html = requests.get(url).text
    html = etree.HTML(html)
    r = html.xpath('//div[@class="read-content j_readContent"]/p/text()')
    # t = ''
    # for i in r:
    #     t+=str(i)
    return r


# 输入0-10数字
def input_1(str=''):
    while True:
        u = input(str)
        if u in ['0','1','2','3','4','5','6','7','8','9','10']:
            return int(u)
        else:
            print('输入格式不符合规范，请重新输入！')


# 获取简介
def ge_info(url,html=html):
    # url = 'https://book.qidian.com/info/1031264709/'
    etree = html.etree
    html = requests.get(url).text
    html = etree.HTML(html)
    r = html.xpath('//div[@class="book-intro"]/p/text()')
    return r
