#!coding=utf-8

import requests
import httpx
import asyncio
import parsel
from lxml import etree

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,mt;q=0.5',
    'Accept-Encoding': 'gzip, deflate, br, zstd',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'Cookie': 'acw_tc=2760829717203621918175118e70cd3944dd2a39519618567d4208297eb780; acw_sc__v2=668aa4cfecf0788e2bfc663dfea7a3e379ce66a8'
}

async def get_link(url):


    name_link_list = []
    response=requests.get(url, headers=headers)
    response.encoding='utf-8'
    Xpath=parsel.Selector(response.text)
    dd=Xpath.xpath('/html[1]/body[1]/div[5]')
    for a in dd:
        #获取每章节的url链接
        links=a.xpath('./dl/dd/a/@href').extract()
        linklist=['https://www.17k.com'+link for link in links]
        #获取每章节的名字
        names=a.xpath('./dl/dd/a/span/text()').extract()
        namelist=[name.replace('\n','').replace('\t','') for name in names]
        #将名字和url链接合并成一个元组
        name_link_list=zip(namelist,linklist)


    task = []
    for name, link in name_link_list:
        task.append(get_text(name, link))
    await asyncio.wait(task)

async def save_data(name,text):
    f=open(f'小说/{name}.txt','w',encoding='utf-8')
    for texts in text:
        f.write(texts)
        f.write('\n')
        print(f'正在爬取{name}')

async def get_text(name,link):
    async with httpx.AsyncClient() as client:
        response=await client.get(link, headers=headers)
        html=etree.HTML(response.text)
        text=html.xpath('//*[@id="readArea"]/div[1]/div[2]/p/text()')
        await save_data(name,text)


if __name__=='__main__':
    url = 'https://www.17k.com/list/2536069.html'
    loop = asyncio.get_event_loop()
    loop.run_until_complete(get_link(url))