import requests, lxml.html, os

etree = lxml.html.etree
url = 'https://www.qidian.com/all'  # 初始站点

class Spider:
    def __init__(self):
        self.num = 1

    def get_html(self, url):
        try:
            response = requests.get(url)
            html = etree.HTML(response.text)
            return html
        except:
            print('html获取错误')

    def get_books(self, url):
        try:
            html = self.get_html(url)
        except:
            print('get_books error')
            return

        books = html.xpath('//div[@class="book-mid-info"]/h4/a/text()')  # 所有小说的书名
        urls = html.xpath('//div[@class="book-mid-info"]/h4/a/@href')   # 所有小说书名的url
        for book, _url in zip(books, urls):
            path = 'qidian3\\' + book  # 确定根据书名创建的目录路径
            if os.path.exists(path):
                print('文件存在')
                return
            os.mkdir(path)
            yield book, 'http:' + _url + '#Catalog', path    # 返回每本小说的书名, 完整的书名url, 目录路径

    def get_book_info(self, url):
        for a_book_name, a_book_url, path in self.get_books(url):
            print(a_book_name, a_book_url)
            try:
                html = self.get_html(a_book_url)
            except:
                print('get_book_info error')
                return

            book_zjs = html.xpath('//ul[@class="cf"]/li/a/text()')  # 每本小说的所有章节名称
            zjs_url = html.xpath('//ul[@class="cf"]/li/a/@href')  # 每本小说的所有章节url
            for b, z in zip(book_zjs, zjs_url):
                print(b, z)
                yield b, 'http:' + z, path   # 返回每一章节名称, 每一章节的完整url, 之前创建的目录路径

    def down_info(self, url):
        for name, _url, path in self.get_book_info(url):
            try:
                html = self.get_html(_url)
            except:
                print('down_info error')
                return
            print('正在抓取: ' + _url[:23] + '.....')
            try:
                info_list = html.xpath('//div[@class="read-content j_readContent"]/p/text()')
                info = ''.join(info_list)
                with open(path + '\\' + str(self.num) + name + '.txt', 'w', encoding='utf-8') as f:
                    f.write(info)
                    print('success')
                self.num += 1
            except:
                print('error------error')


spider = Spider()
spider.down_info(url)


