import urllib3
from bs4 import BeautifulSoup

http = urllib3.PoolManager()  # 线程池生成请求
res = http.request('GET', 'http://www.twxs8.com/35_35835/')
dom = res.data.decode()

# 解析HTML文档
soup = BeautifulSoup(dom, 'html.parser')

searchDic = {}
ddTag = soup.find_all('dd')
for e in ddTag:
    a = e.find('a')
    searchDic[a.text.strip()] = "http://www.twxs8.com/" + a.get("href")

del_keys = list(searchDic.keys())[:12]
for key in del_keys:
    del searchDic[key]


# for key, value in searchDic.items():
#     with open('output.txt', 'a', encoding='utf-8') as f:
#         f.write(key + "    " + value+"\n")


def search(items):
    for k in items:
        print(k)
        content = http.request('GET', items[k])
        contentDom = content.data.decode()
        contentSoup = BeautifulSoup(contentDom, 'html.parser')
        pTag = contentSoup.find_all('p')
        article = ""
        for ee in pTag:
            article += ee.text.strip() + "\n"
        with open('大爱仙尊.txt', 'a', encoding='utf-8') as f:
            f.write(article + "\n")

    # kk_list = [0, 1, 2, 3, 4, 5]
    # for kk in kk_list:
    #     part_items = dict(list(searchDic.items())[0:10])


search(dict(list(searchDic.items())[0:10]))
search(dict(list(searchDic.items())[10:20]))
