import requests
from bs4 import BeautifulSoup
import os
import re
import time

headers = {
    # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    # 'Accept-Encoding': 'gzip, deflate, br',
    # 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7',
    # 'Cache-Control': 'max-age=0',
    # 'Cookie': 'sc_is_visitor_unique=rx11977086.1640188267.FFA2B813858D4F0537AB56EF977434FE.3.3.3.3.3.3.3.3.3; country=tw; b-user-id=949cd6b2-970e-ceac-77c1-773e18d758fa',
    # 'Host': 'www.cool18.com',
    # 'Sec-Ch-Ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
    # 'Sec-Ch-Ua-Mobile': '?0',
    # 'Sec-Ch-Ua-Platform': '"Windows"',
    # 'Sec-Fetch-Dest': 'document',
    # 'Sec-Fetch-Mode': 'navigate',
    # 'Sec-Fetch-Site': 'none',
    # 'Sec-Fetch-User': '?1',
    # 'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36'
}
cookies = {
    # 'sc_is_visitor_unique': 'rx11977086.1640188267.FFA2B813858D4F0537AB56EF977434FE.3.3.3.3.3.3.3.3.3',
    # 'b-user-id': '949cd6b2-970e-ceac-77c1-773e18d758fa',
    # 'country': 'tw'
}
proxy_ip = {
    'http': 'http://127.0.0.1:1080',
    'https': 'http://127.0.0.1:1080'
}
timeout = 60
download_list = []
path = './book/'


def show_menu():
    global download_list
    while True:
        clear()
        print('┏━━━━━━━━┓')
        print('┃──书──屋──┃')
        print('┗━━━━━━━━┛')
        # 0 下载
        # 1 读取
        key = input('输入选项：')

        print(key)
        if key == '0':
            while True:
                download_list.clear()
                name = input('请输入名字：')
                if name == 'q':
                    break
                url = input('请输入网址：')
                if url == 'q':
                    break
                else:
                    print('━━━━━━━━━━')
                    download_list.clear()
                    download_page(name, url)
                    print('下载结束！')

        elif key == '1':
            clear()
            read_html()
        elif key == 'q':
            return
        else:
            input('未知指令')


def clear():
    os.system('cls')
    print('\n' * 40)
    # print('\n')


def fix_name(name):
    # \/:*?"<>|
    new_name = re.sub(r'\\', '', name)
    new_name = re.sub(r'/', '', new_name)
    new_name = re.sub(r':', '：', new_name)
    new_name = re.sub(r'\*', '※', new_name)
    new_name = re.sub(r'\?', '？', new_name)
    new_name = re.sub(r'"', '“', new_name)
    new_name = re.sub(r'<', '《', new_name)
    new_name = re.sub(r'>', '》', new_name)
    new_name = re.sub(r'\|', '丨', new_name)
    return new_name


def load_html(url='https://www.cool18.com/bbs4/index.php'):
    # 沉默的加载
    # time.sleep(1)
    try:
        res = requests.get(url=url, headers=headers, timeout=timeout, proxies=proxy_ip)
        if res.status_code == 200:
            return res.text
        else:
            print(url)
            print('┃加载页面状态出错┃')
            return ''

    except Exception as e:
        print(url)
        print('┃加载页面请求报错┃')
        return ''


def download_html(name='004.html', url='https://www.cool18.com/bbs4/index.php'):
    print('┏━━开始下载━━┓')
    try:
        text = load_html(url)
        print('┃━━正在保存━━┃')
        file = open(fix_name(name), 'w', encoding='utf-8')
        file.write(text)
        file.close()
        print('┗━━下载完成━━┛')
    except Exception as e:
        print('┗━━保存出错━━┛')


def read_html():
    global download_list
    text = load_html()
    # file = open('004.html', 'r', encoding='utf-8')
    # text = file.read()
    # file.close()
    soup = BeautifulSoup(text, 'html.parser')

    book_list1 = soup.find_all('td', attrs={'class': 'gold_td'}, )
    book_list2 = soup.find('div', attrs={'id': 'd_list'}).find('ul').find_all('li', recursive=False)

    while True:
        clear()
        # 精华导读
        print('┏━━━━━━━━┓\n┃──精华导读──┃\n┗━━━━━━━━┛')
        for index in range(len(book_list1)):
            print(('0' if index + 1 < 10 else '') + str(index + 1) + ' ' + book_list1[index].string)

        # 页面内容
        print('┏━━━━━━━━┓\n┃──页面内容──┃\n┗━━━━━━━━┛')
        for index in range(len(book_list2)):
            print(
                ('00' if index + 1 < 10 else '0' if index + 1 < 100 else '')
                + str(index + 1) + ' ' + book_list2[index].find('a').get_text()
            )

        keyword = input('选择要下载的内容：')
        if len(keyword) == 2 and 0 < int(keyword) <= len(book_list1):
            print('━━━━━━━━━━')
            download_list.clear()
            download_page(
                book_list1[int(keyword) - 1].string,
                book_list1[int(keyword) - 1].find('a').get('href')
            )
            input('全部内容下载完成')
        elif len(keyword) == 3 and 0 < int(keyword) <= len(book_list2):
            print('━━━━━━━━━━')
            download_list.clear()
            download_page(
                book_list2[int(keyword) - 1].find('a').get_text(),
                book_list2[int(keyword) - 1].find('a').get('href')
            )
            input('全部内容下载完成')
        elif keyword == 'q':
            return
        else:
            input('未输入正确')


def download_page(name='book', url=''):
    global download_list
    base_url = 'https://www.cool18.com/bbs4/'

    # 如果非 http / https 开头，就拼接上base_url
    if not url.startswith('http'):
        url = base_url + url
    # 如果 url 已经下载过，就结束
    if url in download_list:
        return
    else:
        download_list.append(url)
        print('准备下载：' + name + ' ...')
        print('下载URL：' + url)
        read_page(name, url)


def read_page(name, url):
    # file = open('004', 'r', encoding='utf-8')
    # text = file.read()
    # file.close()
    text = load_html(url)
    if text == '':
        print('┃未读取到任何信息┃')
        return
    book = ''
    soup = BeautifulSoup(text, 'html.parser')

    # 读取页面内容
    for content in soup.find('pre').children:
        # 如果是文本子节点直接下载
        if isinstance(content, str):
            if not content.strip() == '':
                book += '\t\t' + content.strip() + '\n'
        # elif content.get('class') and 'text-count' in content.get('class'):
        #     book += content.get_text()
        else:
            t = content.get_text()
            if not (t.strip() == '' and t.strip() == 'cool18.com'):
                book += '\t\t' + t.strip() + '\n'

    # 先判断路径是否存在，不存在就先创建
    if not os.path.isdir(path):
        os.mkdir(path)
    file = open(path + fix_name(name) + '.txt', 'w', encoding='utf-8')
    file.write(re.sub('cool18.com', '', book))
    file.close()
    print('下载完毕！' + name)

    # 更多章节
    more_page = soup.find('font', attrs={'size': '4'})
    if more_page:
        more_page_a = more_page.find_all('a')
        print('发现更多章节！共计：【' + str(len(more_page_a)) + '】页')
        print('将加入下载队列，标题如下：')

        for page in more_page_a:
            print(page.get_text().strip())
        # 开始逐个击破
        print('━━━━━━━━━━')
        for index in range(len(more_page_a)):
            print('开始下载第【' + str(index+1) + '】页')
            download_page(more_page_a[index].get_text().strip(), more_page_a[index].get('href'))


def test():
    # name = '【小马】(1-5) 作者:不知火.txt'
    # url = 'https://www.cool18.com/bbs4/index.php?app=forum&act=threadview&tid=14366648'
    # print('┏━━开始下载━━┓')
    # res = requests.get(url=url, headers=headers, timeout=timeout, proxies=proxy_ip)
    # if res.status_code == 200:
    #     print('┃━━正在保存━━┃')
    #     file = open(fix_name(name), 'w', encoding='utf-8')
    #     file.write(res.text)
    #     file.close()
    #     print('┗━━下载完成━━┛')
    # else:
    #     print('┗━━状态出错━━┛')

    # file = open('004.txt', 'r', encoding='utf-8')
    # text = file.read()
    # file.close()
    # soup = BeautifulSoup(text, 'html.parser')
    # more = soup.find('font', attrs={'size': '4'})
    # if more:
    #     more.find_all('a')
    # else:
    #     print(0)

    if not os.path.isdir('./book/'):
        os.mkdir('./book/')

        file = open('./book/1.txt', 'w', encoding='utf-8')
        file.write('123')
        file.close()


show_menu()
# test()
# read_page(1,1)
