from analysis import save
from analysis.spider import MyRequest
from analysis.spider import common


base_url = 'https://www.cc148.net/16_16054/'
headers = {
            "Host": "www.cc148.net",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
            }
writer = save.SaveTxt('元龙.txt')

#获取url与title，小说网站基本就改一下start_index的判定条件就行，叫笔趣阁的网站结构都一样
def get_url_title(base_url,headers):
    base_tree = MyRequest.get_tree(base_url,headers,encoding = 'gbk')
    url_list = base_tree.xpath('//div[@id="list"]/dl/dd/a/@href')
    title_list = base_tree.xpath('//div[@id="list"]/dl/dd/a/text()')

    start_index = title_list.index('楔子')

    if len(url_list) != len(title_list):
        print('长度不等，请检查：url_list长度为：{} ；title_list长度为：{}'.format(len(url_list),len(title_list)))

    return url_list,title_list,start_index

#获取章节内容
@common.sleeper(sleep_mix = 6,sleep_max =10)
def get_content(content_url,headers):
    content_tree = MyRequest.get_tree(content_url,headers,encoding = 'gbk')
    content_list = (content_tree.xpath('//div[@id="content"]/text()'))

    if len(content_list) > 1:
        content = ''.join(content_list)
    else:
        content = common.extract_first(content_list)

    content = content.replace('    ','\n    ') # 根据各网站格式不同进行清洗

    return content


def run_spider(start_index=None):
    url_list, title_list, start = get_url_title(base_url, headers)
    if start_index is None:
        start_index = start
    print('起始index为 ： {}'.format(start_index))

    totle_num = len(url_list) - start
    now_num = start_index - start + 1

    for url,title in zip(url_list[start_index:],title_list[start_index:]):
        content_url = base_url + url
        content =get_content(content_url,headers)

        index = now_num + start -1 # 如果断了，start_index填这个就行了
        rate_progress = '阅读进度 ：{}%，index：{} '.format(round((now_num * 100)/totle_num, 2),index)
        chapter = title + '\n' + content  + '\n\n' + rate_progress + '\n' + '#'*50 + '\n\n'

        writer.save_txt(chapter)
        print('储存到：{}；index:{}, {}'.format(title,index,rate_progress))
        now_num += 1


if __name__ == '__main__':
    run_spider(start_index=243)






