import os

import requests
from lxml import etree

url = 'https://book.qidian.com/info/1033081156/'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.0.0 Safari/537.36',
    'Referer': 'https://book.qidian.com/info/1033081156/',
    'Cookie': 'newstatisticUUID=1673879212_1637873068; _csrfToken=6zWSzryQM14sHQAv9ZTshBECS97t9OUEmqdAnwBN; fu=142118953; '
              '_gid=GA1.2.607169809.1673879216; '
              'Cc2838679FS=58hEpSLO9UFK7WxLxzMVjm5s.10OQFS90UPo7Deykh6f7MzMPiROqDOYIpn2Os4_IX0rugVW..HppcwgfOR_wcA; e1={'
              '"pid":"qd_p_qidian","eid":"qd_A81","l1":3}; e2={"pid":"qd_p_qidian","eid":"qd_A19",'
              '"l1":3}; traffic_utm_referer=; Hm_lvt_f00f67093ce2f38f215010b699629083=1673879216,1673886122; '
              'ywguid=120219705302; ywkey=ywGZ7dHzP95Z; ywopenid=6C84CC849F206E85B82E57817CBDD22D; qdrs=0|3|0|0|1; '
              'showSectionCommentGuide=1; qdgd=1; lrbc=1035857690|737686964|0; rcr=1035857690; bc=1035857690; '
              'Hm_lpvt_f00f67093ce2f38f215010b699629083=1673886567; _ga=GA1.1.1765722841.1673879216; '
              '_ga_FZMMH98S83=GS1.1.1673886122.2.1.1673886656.0.0.0; '
              '_ga_PFYW0QLV3P=GS1.1.1673886122.2.1.1673886656.0.0.0; '
              'Cc2838679FT'
              '=63ZY4BCHm8ZqqqqDmXCz2TGj_zMrr83E8xflU8yUCEAVSwh9cBdWB_xpCK4XhKvufo6g9F5iaYIPokMgSkD_aZyEMoE81DkFNFzJykSo9oxcS._SSTIRGXveEAiRU3oq_ku0VMDN.YVGpNLlJ08SxUfaISz997DNG596mES.lgISZmLTC8XL8sI2bH45rO9j0fvKBnjZNy7YguE8rv5mLmR3V3R.JSGVRY8iBCuccTLLS3vKFUu29zqWbsSZG28GPbQxHqzoNa4DRCczkQNSFnC '
}

response = requests.get(url=url, headers=headers)
html = response.text
tree = etree.HTML(html)

title = tree.xpath('//div[@class="book-info "]/h1/em/text()')[0]  # 文章标题

if not os.path.exists(title):
    os.mkdir(title)

free_list = tree.xpath('//div[@class="volume-wrap"]//div[1]//ul/li/h2/a')

for free in free_list:
    link = 'http:' + free.xpath('./@href')[0]  # 免费的链接
    name = free.xpath('./text()')[0]  # 章节的标题名称

    detail_text = requests.get(url=link, headers=headers).text
    tree = etree.HTML(detail_text)
    topic = tree.xpath('//div[@class="read-content j_readContent"]/p/text()')  # 返回的是一个列表 每一个元素就是一个段落
    # print(topic)
    # 数据保存
    f = open(title + '/' + name + '.docx', encoding='utf-8', mode='w')
    f.write(name + '\n\n')  # 写入文章标题
    # 写入正文
    for sentence in topic:
        sentence = sentence.strip().replace(r'\u3000', '')
        f.write('    ' + sentence + '\n\n')

    f.close()  # 关闭文件
    print(f'{name}--保存成功!')
