# -*- coding: utf-8 -*-
# @Time    : 2018/5/21 09:08
# @Author  : daigua
# @File    : 12-笔趣阁-beautifulsoup.py
# @Software: PyCharm
# 写到1个文件里面


from bs4 import BeautifulSoup
import re
import urllib.request
import os

zj=4948

def gain_html_content(url):
    """获取网页的html内容
        url:目标url地址
        content:返回的页面内容
    """
    # 构建请求对象
    headers = {
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"
    }
    try:
        request = urllib.request.Request(url, headers=headers)
        # 发送请求
        response = urllib.request.urlopen(request,timeout=10)
        # 读取文件
        content = response.read().decode('utf-8')
        return content
    except:
        print("timeout url:"+url)
    return ""



def get_chapter(content):
    # 先构建一个soup对象
    soup = BeautifulSoup(content, "html.parser")
    # 获取小说的标题
    title = soup.title.string
    # 找到小说的内容（是在div标签里面，并且这个div标签的id为"list"）
    content = soup.find("tbody", attrs={'style': 'table-layout:fixed;'})
    # 获取章节列表,列表里面的内容都是标签对象
    chapter_list = content.find_all("a", attrs={"style": "", "href": re.compile(r"html_data/.*/\d+\.html")})
    # 定义一个空的字典，用于存放章节名称和对应url的键值对
    chapter_dict = dict()
    for x in chapter_list:
        file_name = x.string
        file_url = x.attrs["href"]  # 获取a标签中href属性里面的值
        chapter_dict[file_name] = "https://yj1.bt7086.xyz/pw/" + file_url
        # 将章节字典，和标题返回
    return chapter_dict, title


def get_text(url):

        # 获取页面内容
        content = gain_html_content(url)
        while content == "":
            print("retry url:" + url)
            content = gain_html_content(url)
        soup_text = BeautifulSoup(content, "html.parser")
        new_content = soup_text.find("div", id="read_tpc")

        # 获取soup对象中的文本信息text
        # new_content = new_content.get_text()
        #处理换行
        # for s in new_content.strings:
        #     print(s)
        # 调用写入本地的函数
        return new_content.strings
        # return new_content.stripped_strings



def write2file(title, file_name, content):
    """将小说写入本地文件"""
    print("%s下载中。。。" % file_name)
    direction = title + "/" + file_name
    if not os.path.exists(title):
        os.mkdir(title)
    with open(direction + ".txt", 'w') as f:
        f.write(content)
    print("%s下载完毕!" % file_name)

def fmPage(page):
    if(page<10):
        return "00"+str(page)
    if(page<100):
        return "0"+str(page)
    return str(page)

def selectPage(page):
    # 获取页面内容
    # tar_url = input("https://www.kuaiyankanshu.net/842931/")
    tar_url = "https://yj1.bt7086.xyz/pw/thread.php?fid=17&page="+str(page)
    content = gain_html_content(tar_url)
    while content == "":
        print("retry tar_url:" + tar_url)
        content = gain_html_content(tar_url)
    print("content%s" % content)
    # 获取 章节名字：url 字典和小说标题
    dict1, title = get_chapter(content)
    for name, url in dict1.items():
        print("dict1,title:" + name+"-->"+url)
    title='1024'
    with open("1024pages/"+title +"_"+fmPage(page)+ ".txt", mode='w',encoding='utf-8') as f:
        isCon=0
        for name, url in dict1.items():
            if(name==".::" or name=="本站手机端APP下载"):
                continue
            global zj
            zj = zj + 1
            # if(name=="[05.12] 被学生轻易扯下内裤的妻子"):
            #     isCon=1
            # if(isCon==0):
            #     continue
            name="第"+str(zj)+"章   "+name
            print("page:"+fmPage(page)+" name:" + name + "-->" + url)

            # 获取小说内容，并写入本地txt文件
            f.write("\r\n")

            f.write(name)
            # f.write("\r\n")
            new_content = get_text(url)


            for s in new_content:
                f.write("\r\n")
                f.write(s)

def main():
    #
    for i in range(100,500):
        # print(i)
        selectPage(i)

if __name__ == "__main__":
    main()

# xx=get_text("https://yj1.bt7086.xyz/pw/html_data/17/2005/4772090.html")
# for s in xx:
#     print(s)
