import requests
import re
import os
import time
import sys
from bs4 import BeautifulSoup


def get_chapter_url(novel_catalog_url):
    # 小说目录页URL
    # novel_catalog_url = 'https://www.xyangguiweihuo.com/11/11516/'
    head = {
	'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'
    }
    html = requests.get(novel_catalog_url, headers = head).content.decode('gbk')

    soup = BeautifulSoup(html, 'html.parser')
    dd_tag = soup.find_all('dd')
    novel_name = soup.find_all('h2')[-1].text
    
    chapter_url = []
    chapter_name = []
    
    for dd_meta in dd_tag:
        a_tag = dd_meta.find_all('a')
        for a_meta in a_tag:
            chap_url = 'https://www.xyangguiweihuo.com/' + a_meta.get('href')
            chap_name = a_meta.text
            chapter_url.append(chap_url)
            chapter_name.append(chap_name)
            
    return chapter_url, chapter_name, novel_name
            

# 将小说下载到多个文件中，以章节为单位   
def download_novel_chapter(chapter_url, novel_dir, chapter_No):
    head = {
	'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'
    }
    html = requests.get(chapter_url, headers = head).content.decode('gbk')
    soup = BeautifulSoup(html, 'lxml')
    txt = soup.find_all('div', {'class':'showtxt'})[-1].text
    
    # 将原文的空格换成换行符
    # novel_txt = txt.replace('　　', '\n')
    # 将原文的空格去除，文本密集
    novel_txt = txt.replace('　　', '')
    
    # 去除尾部的网址
    novel_txt = re.sub('[a-zA-z]+://[^\s]*','',novel_txt)
    
    chapter_dir = novel_dir + '/' + str(chapter_No) + '_' + soup.find_all('h1')[-1].text + '.txt'
    
    print('正在下载第 ' + str(chapter_No) + ' 章.........')
    
    with open(chapter_dir, 'w+', encoding = 'utf-8') as f:
        f.write(novel_txt)
    print('Done!')
    
# 将正本小说下载到一个txt文件中    
def download_novel_chapter_onefile(chapter_url, novel_dir, chapter_No, novel_name):
    head = {
	'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36'
    }
    html = requests.get(chapter_url, headers = head).content.decode('gbk')
    soup = BeautifulSoup(html, 'lxml')
    txt = soup.find_all('div', {'class':'showtxt'})[-1].text
    
    # 将原文的空格换成换行符
    # novel_txt = txt.replace('　　', '\n')
    # 将原文的空格去除，文本密集
    novel_txt = txt.replace('　　', '')
    
    # 去除尾部的网址
    novel_txt = re.sub('[a-zA-z]+://[^\s]*','',novel_txt)
    
    print('正在下载第 ' + str(chapter_No) + ' 章.........')
    
    with open(novel_dir + '/' + novel_name + '.txt', 'a+', encoding = 'utf-8') as f:
        f.write(novel_txt)
    print('Done!')
    
# 参数flag为1表示下载到一个文件中，0表示一章一个文件  
def download(url, flag):
    chapter_url, chapter_name, novel_name = get_chapter_url(url)
    chapter_num = len(chapter_url)
    novel_dir = './' + novel_name
    
    if not os.path.exists(novel_dir):
        os.mkdir(novel_dir)

    for i in range(9, chapter_num):
        if flag:
            download_novel_chapter_onefile(chapter_url[i], novel_dir, i - 8, novel_name)
        else:
            download_novel_chapter(chapter_url[i], novel_dir, i - 8)
    
    
# 将小说名作为程序输入
def get_novel_catalog_url():
    # novel_name = sys.argv[1]
    while(1):
        novel_name = input('输入搜索小说名：')
        if len(novel_name) == 0:
            novel_name = input('输入搜索小说名：')
        else:
            break
    
    head = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:79.0) Gecko/20100101 Firefox/79.0'}
    
    Scheme = 'https'
    Host = 'so.biqusoso.com'
    Filename = '/s.php'
    server_url = Scheme + '://' + Host + Filename
    param = {'ie':'utf-8', 'siteid':'yangguiweihuo.com', 'q': novel_name }
    html = requests.get(server_url, headers = head, params = param).content.decode('utf-8')
    
    soup = BeautifulSoup(html, 'lxml')
    search_list = soup.find_all('a', {'target':'_blank'})
    num_found = len(search_list)
    count = 0
    novel_list = []
    
    if not num_found:
        print('未找到符合名称的小说，请检查后重新运行本程序！')
        return
    print('发现符合的小说： ' + str(num_found) + ' 本')
    
    print('-----------------------------------------------------------------------')
    for a_tag in search_list:
        count = count + 1
        href = a_tag.get('href')
        href_list = list(href)      # 转list
        href_list.insert(11, 'x')   # 插入字符
        href = ''.join(href_list)   # 转回字符串
        novel_list.append(href)
        novel = a_tag.text
        print('编号' + str(count) + ' ' + novel)
        print('链接：' + href)
        print('-----------------------------------------------------------------------')
    
    print('| | | | | | | | | | | | | | |')
    print('| | | | | | | | | | | | | | |')
    print('| | | | | | | | | | | | | | |')
    print('| | | | | | | | | | | | | | |')
    user_No = input('请输入需要下载的小说的编号：')
    if int(user_No) < 0 or int(user_No) > num_found :
        print('输入的编号不符合，请重新运行程序！！！')
        return
    novel_catalog_url = novel_list[int(user_No) - 1]
    download(novel_catalog_url, 1)

    

if __name__ == "__main__":
    
    get_novel_catalog_url()
    

  
