import requests, re, os, lxml.html
# from lxml import etree
etree = lxml.html.etree


class Spider:
    def start_reqeust(self, url):
        r = requests.get(url)
        html = etree.HTML(r.text)
        src_list = html.xpath('//div[@class="book-mid-info"]/h4/a/text()')
        name_list = html.xpath('//div[@class="book-mid-info"]/h4/a/@href')
        for name, src in zip(src_list, name_list):
            path = 'qidian\\' + name
            os.mkdir(path)
            yield name, 'http:' + src + '#Catalog', path

    # def two(self, url):
    #     for i in self.start_reqeust(url):
    #         print(i)
    #         r = requests.get(i[1])
    #         e = etree.HTML(r.text)
    #         name_l = re.findall('data-rid="\d+".*?><a.*?>(.*?)</a>', r.text, re.S)
    #         print(name_l)
        # name_list = html.xpath('//ul[@class="cf"]/li/a/text()')
        # src = html.xpath('//ul[@class="cf"]/li/a/@href')
        # print(name_list)


url = 'https://www.qidian.com/all'
spider = Spider()
a = spider.start_reqeust(url)
for i in a:
    print(i)
    name, src = i[0], i[1]
    html = etree.HTML(requests.get(src).text)
    zj_list = html.xpath('//ul[@class="cf"]/li/a/text()')
    src_list = html.xpath('//ul[@class="cf"]/li/a/@href')
    for zj, src in zip(zj_list, src_list):
        a = i[2] + '\\' + zj
        os.mkdir(a)
        new_src = 'http:' + src
        print(zj, new_src)
        html = etree.HTML(requests.get(new_src).text)
        info = html.xpath('//div[@class="read-content j_readContent"]/p/text()')
        info_ = ''.join(info)
        with open(a + '\\' + zj + '.txt', 'a', encoding='gbk') as f:
            f.write(info_)
    break