from bs4 import BeautifulSoup
import bs4.element
from utils import req
from config import *
import os
import time

def get_index_page():
    url = HOST_URL
    content = req.request_content(url)
    return content

# def choose_index_range():
#     index_range = input("要下载的章节范围(如400_800) 默认0_为0到最后")
#     ranli = list(map(int,index_range.split('_')))
#     if len(ranli) == 0:
#         ranli = [0]
#     index_range = {}
#     index_range['min'] = min(ranli)
#     index_range['max'] = max(ranli)
#     return index_range

def get_dest_page_map(index_page):
    index_map = {}
    # index_range = choose_index_range()
    soup = BeautifulSoup(index_page,'lxml')
    index_list = soup.find_all("dd")
    for row in index_list:
        child = row.next
        href = child.attrs['href']
        title = child.attrs['title']
        index_map[href] = title
    return index_map

def get_content_txt(content_page):
    txt = ""
    soup = BeautifulSoup(content_page,'lxml')
    contents = soup.find_all('div',attrs={'id':'content'})[0].contents
    for content in contents:
        if type(content) == bs4.element.NavigableString:
            txt = txt + str(content).strip() + '\n\n'
    return txt

Downloaded_Num = 0

def write_txt_file(txt,title,output_file):
    global Downloaded_Num
    with open(output_file,'a') as f:
        f.write(title+'\n')
        f.write(txt)
    Downloaded_Num += 1


def check_output_file(output_file):
    if os.path.exists(output_file):
        choice = input("文章已经存在是否覆盖？(y/N)")
        if choice not in  ['n','N']:
            os.remove(output_file)
        else:
            exit(0)

Downloaded_Num = 0
Total_Num = 0

def print_progress():
    global Total_Num,Downloaded_Num
    percent = Downloaded_Num/Total_Num
    i = int(percent * 100)
    print(f'\r|{"#"*i}{"*"*(100-i)}|{percent:5.2%}',end="")

def main():
    global Total_Num,Downloaded_Num
    output_file = OUTPUT_DIR + BOOK_NAME + '.txt'
    check_output_file(output_file)
    index_page = get_index_page()
    index_map = get_dest_page_map(index_page)
    Total_Num = len(index_map)
    for url_part,title in index_map.items():
        attempts = 0
        while attempts < MAX_RETRY:
            try:
                txt_page_url = HOST_URL + url_part
                content_page = req.request_content(txt_page_url)
                txt = get_content_txt(content_page)
                write_txt_file(txt,title,output_file)
                print_progress ()
                attempts = MAX_RETRY
                time.sleep(range(1,5))
            except:
                time.sleep(1)
                attempts += 1
                if attempts == MAX_RETRY:
                    print(f'\r {title} be skipped')



if __name__ == '__main__':
    main()