# python version 3.8.5



import requests
from bs4 import BeautifulSoup
from docx import Document
import sys, os

urlss = "http://3g.sbkk8.com/gushihui/"


# 获取故事汇的 故事集-链接 字典
def getdraw(urlss):
    connection = {}
    res = requests.get(url=urlss)
    res.encoding = "gbk"
    html = res.text
    bf = BeautifulSoup(html, features='lxml')
    # 获取所有故事的a标签
    headers = bf.find_all("a", attrs={"target": "_blank"})
    for i in headers:
        __text__ = str(i.text)[:-1]
        __href__ = i.get("href")[10:]

        connection[__text__] = __href__
    # [print(k, v) for k, v in connection.items()]  # 公主童话故事 gongzhutonghuagushi/
    # print(len(connection))

    # 爬取失败就用这个
    # dr = {k: connection[k] for k in reversed(connection)}
    # for i in range(1, 90):
    #     # print(i)
    #     dr.popitem()
    # drr = {k: dr[k] for k in reversed(dr)}
    # # print(len(drr))
    #
    # # print(drr)
    # return drr

    return connection


# 获取故事汇的 故事集-链接 字典
def getstt(urlss):
    connection = {}
    res = requests.get(url=urlss)
    res.encoding = "gbk"
    html = res.text
    bf = BeautifulSoup(html, features='lxml')
    # 获取所有故事的a标签
    headers = bf.find_all("a", attrs={"target": "_blank"})
    for i in headers:
        __text__ = str(i.text)[:-1]
        __href__ = i.get("href")[10:]

        connection[__text__] = __href__
    # [print(k, v) for k, v in connection.items()]  # 公主童话故事 gongzhutonghuagushi/
    # print(len(connection))

    return connection


# 将每个小故事存进word
def getpersonal(self):
    allss = len(self)  # 故事集的总数
    s = 0
    for m, v in self.items():
        # print('\r' + "正在下载第{}个故事集，故事集为{}，共有{}个故事集".format(s, k, allss), end='', flush=True)

        s = s + 1
        # print(k,v)
        html = urlss + v
        persons = getstt(html)  # 获取每个小故事集的连接
        allcollections = len(persons)  # 每个故事集的小故事总数
        # print(persons)
        #
        document = Document()  # 创建文档对象
        i = 1
        for j, k in persons.items():
            print(
                '\r' + "正在下载第【{}】个故事集，故事集为《{}》，共有【{}】个故事集,正在下载第【{}】个小故事，故事名为《{}》,此故事集共有【{}】个小故事".format(s, m, allss, i,
                                                                                                        j,
                                                                                                        allcollections),
                end='', flush=True)

            html_p = urlss + k  # 获取每个小故事的连接
            gethtmls = AllSTORIES(html_p)
            __title__ = gethtmls[0]
            document.add_heading(gethtmls[1], 1)  # 添加段落标题
            document.add_paragraph(gethtmls[2] + "\n")  # 添加段落内容

            # print('\r' + '正在爬取第%s章' % i, end='', flush=True)

            # print('正在爬取第%s章' % i)
            i = i + 1
        print('故事集%s爬取完毕！' % k)
        path = "C:\\Users\\zjzfo\\Desktop\\逗她开心\\stories\\"
        document.save(path + '%s.docx' % (str(__title__)))  # 保存文档


# 获取每个页面的小故事内容
def AllSTORIES(url):
    adds = []
    res = requests.get(url=url)
    res.encoding = "gbk"
    html = res.text
    bf = BeautifulSoup(html, features='lxml')
    # 获取小说集的标题
    headers = bf.find_all("a")[2].text
    # 获得小说的标题
    title = bf.find_all('h1')[0].text
    # 获得小说的内容
    story = bf.find_all("div", {"class": {"articleContent"}})
    story = story[0].text
    # print(title, story)

    adds.append(headers)
    adds.append(title)
    adds.append(story)
    return adds


if __name__ == "__main__":
    # 判断文件夹是否存在
    if not os.path.exists("stories"):
        os.mkdir(os.path.join(os.getcwd(), "stories"))

    dics = getdraw(urlss)
    getpersonal(dics)

    # 保存markdown格式
    # with open('102个鬼故事.md', mode='w', encoding='utf-8') as f:
    #     for i in range(0, 102):
    #         f.write("## " + crawler(i)[0] + "\n")
    #         f.write("---" + "\n")
    #         f.write("        " + crawler(i)[1] + "\n")
    #         print("第个{}小故事已打录入完成".format(i+1))
    # print("All Down!!!")
