import requests, lxml.html, os

etree = lxml.html.etree
url = 'https://www.qidian.com/all'  # 初始站点


class Spider:
    num = 1

    def get_html(self, url):
        try:
            response = requests.get(url)
            html = etree.HTML(response.text)
            return html
        except:
            print('html获取错误')

    def get_books(self, url):
        html = self.get_html(url)
        books_list = html.xpath('//div[@class="book-mid-info"]/h4/a/text()')  # 所有小说的书名 list
        urls_list = html.xpath('//div[@class="book-mid-info"]/h4/a/@href')  # 所有小说书名的url list
        for book_name, _url in zip(books_list, urls_list):
            path = 'qidian_\\' + book_name  # --------------确定根据书名创建的目录路径
            if not os.path.exists(path):
                os.mkdir(path)
            new_url = 'http:' + _url + '#Catalog'  # ---------每一本书名的完整url
            self.get_books_info(book_name, new_url, path)
            Spider.num = 0

    def get_books_info(self, book_name, new_url, path):
        html = self.get_html(new_url)
        book_zjs = html.xpath('//ul[@class="cf"]/li/a/text()')  # 每本小说的所有章节名称 list
        zjs_url = html.xpath('//ul[@class="cf"]/li/a/@href')  # 每本小说的所有章节名称url  list
        for b, z in zip(book_zjs, zjs_url):
            zj_new_url = 'http:' + z
            # print(b, zj_new_url)
            self.down_book(b, zj_new_url, path)

    def down_book(self, b, zj_new_url, path):
        print('正在抓取: ' + b + ' ' + zj_new_url[:30] + '....')
        try:
            html = self.get_html(zj_new_url)
            info_list = html.xpath('//div[@class="read-content j_readContent"]/p/text()')
            info = ''.join(info_list)
        except:
            print('error')
        try:
            with open(path + '\\' + str(Spider.num) + '--' + b + '.txt', 'w', encoding='utf-8') as f:
                f.write(info)
                print('      success')
                Spider.num += 1
        except:
            print('写入错误')


spider = Spider()
spider.get_books(url)
