import requests
from lxml import etree
from multiprocessing.dummy import Pool as ThreadPool


def spider_link_list(main_url):
    res = requests.get(main_url)
    context = res.text
    selector = etree.HTML(context)
    all_title1 = selector.xpath('//*[@id="list"]/dl/dd/a/text()')
    all_link1 = selector.xpath('//*[@id="list"]/dl/dd/a/@href')

    link_list1 = []
    for link in all_link1[4:]:
        page_url = main_url + str(link).split('/')[-1]
        link_list1.append(page_url)

    return link_list1, all_title1


def spider():
    for i in range(1447, len(link_list)):
        con_res = requests.get(link_list[i])
        con_selector = etree.HTML(con_res.text)
        all_con = con_selector.xpath('//*[@id="content"]/text()')

        with open('D:/茅山捉鬼人.txt', 'a', encoding='utf8') as file_write:
            file_write.write(all_title[i+4])
            print('{}====>标题写入'.format(all_title[i+4]))

            for p in range(len(all_con)):
                one_page = ''.join(all_con[p])
                file_write.write(one_page + '\n')
            print('======>内容写入')


if __name__ == '__main__':
    # pool = ThreadPool(4)

    url = 'http://www.xs.la/17_17786/'
    link_list, all_title = spider_link_list(url)
    spider()

    # pool.map(spider, link_list)
    # pool.close()
    # pool.join()

    print('爬取完成')
