import os
import re
from pathlib import Path

import parsel
import requests
from tqdm import tqdm


def get_url_name(url):
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'
                      ' Chrome/99.0.4844.51 Safari/537.36'
    }
    # url = url_search()
    response = requests.get(url, headers=headers)
    response.encoding = 'gbk'
    return response



def clear():
    a = 0
    with open('D:\\workcode\\爬取小说\\count.ini', 'w', encoding='utf-8') as f:
        f.write(str(a))
    print("clear successful")


def get_url(url):
    print("link_list start...")
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'
                      ' Chrome/99.0.4844.51 Safari/537.36'
    }
    url = url
    response = requests.get(url, headers=headers)
    response.encoding = 'gbk'

    selector1 = parsel.Selector(response.text)
    link_list = selector1.css('#list a::attr(href)').getall()[9:]

    for i in range(0, len(link_list)):
        link_list[i] = "http://www.luoqiuw.com" + link_list[i]
    # print(c)
    # print(d)
    # print(d)
    # exit()
    print("link_list successful")
    return link_list


def get_response(html_url):
    # global error
    """发送请求"""
    headers = {
        'Connection': 'close',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)'
                      ' Chrome/99.0.4844.51 Safari/537.36'
    }
    while True:
        a = 0
        try:
            response = requests.get(url=html_url, headers=headers, timeout=(2, 10))
            response.encoding = 'gbk'
            return response
        except Exception as e:
            # error.append(e)
            a = a + 1
            # tqdm.write(str([e, a]))
            if a <= 10:
                tqdm.write('...', end='')
            elif a == 100:
                tqdm.write(str(e))
                print(a)
                exit()


def get_content(name, list_url, down_dir):
    """获取小说内容"""
    response = get_response(html_url=list_url)  # 调用前面定义好的发送请求函数
    # 解析数据提取想要的内容 解析方式 re css xpath
    # print(response.text)
    selector = parsel.Selector(response.text)  # 把获取到response.text 数据 转换成 selector 对象
    title = selector.css('.bookname h1::text').get()  # css选择器 根据标签属性内容提取数据
    content_list = selector.css('#content::text').getall()  # getall 获取所有 get 获取第一个
    for i in range(len(content_list)):
        content_list[i] = content_list[i].replace("\r\n", '')
    while '' in content_list:
        content_list.remove('')
    content = '\n'.join(content_list)
    if "\r\n" in content:
        print("排版可能出现错误换行")

    with open(down_dir + "\\download\\" + name + '.txt', mode='a+', encoding='utf-8') as f:
        try:
            f.write(title)
        except Exception:
            tqdm.write([title, list_url])
            tqdm.write(title)
        f.write('\n')
        f.write(content)
        f.write('\r\n\t\t')
        # print("yes")


def check_count(name, count, down_dir):
    cl = [x for x in range(count + 1)]
    cl = [str(x) for x in cl]

    for i in range(10):
        cl[i] = '00' + cl[i]
    for i in range(10, 100):
        cl[i] = '0' + cl[i]
    cl.remove('000')
    with open(down_dir + '\\download\\{}.txt'.format(name), 'r', encoding='utf-8') as f:
        s = f.read()
    name_int = re.findall(' 第(.*?)章', s)
    # print(nane_int)
    cv = []
    for i in cl:
        # print(int(i))
        if i in name_int:
            pass
        else:
            cv.append(i)

    if len(name_int) == count:
        print("True")
    else:
        print("Error", end=' ')
        print("chapter_count_{}".format(str(len(name_int))))  # 2857 #383
        print(cv)


def name1(url, down_dir=os.getcwd(), kais=0):
    response = get_url_name(url)
    selector1 = parsel.Selector(response.text)
    name = selector1.css('#info h1::text').get()
    filename = Path(down_dir + "\\download\\{}.txt".format(name))
    if kais == 0:
        if filename.is_file():
            name = name + '_1'
    return name


# def url():
#     url = input("输入完整网址：")
#     return url

def main():
    # global error
    url = input("输入完整网址：")
    down_dir = os.getcwd()

    Count1 = down_dir + '\\count.ini'
    with open(Count1, 'r') as f:
        kais = f.read()
    kais = int(kais)
    url_list = get_url(url)
    name = name1(url, down_dir, kais)
    print("download start")

    for link_list in tqdm(url_list[kais:len(url_list)], desc='download'):
        get_content(name, link_list, down_dir)
        kais = kais + 1
        with open(Count1, 'w', encoding='utf-8') as f:
            f.write(str(kais))
    print("download successful")
    check_count(name, len(url_list), down_dir)


if __name__ == '__main__':
    # error = []

    print("enter...")
    main()
    clear()
    # os.system('python del_word.py')
    # print("hh")
