#!coding=utf-8
import httpx
import asyncio
import parsel
import os
from lxml import etree
import time

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,mt;q=0.5',
    'Accept-Encoding': 'gzip, deflate',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'Cookie': f'clickbids=%2C233924; Hm_lvt_d8bee18481730a9cf11dbbab4c696107=1721638849; HMACCOUNT=234E9CF015EF4127; Hm_lpvt_d8bee18481730a9cf11dbbab4c696107={int(time.time())}',
    'Host': 'www.ddsk.org',
    'Referer': 'http://www.ddsk.org/book/233924.html'
}

def get_header():
    """
    获取header
    :return:
    """
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,mt;q=0.5',
        'Accept-Encoding': 'gzip, deflate',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Cookie': f'clickbids=%2C233924; Hm_lvt_d8bee18481730a9cf11dbbab4c696107=1721638849; HMACCOUNT=234E9CF015EF4127; Hm_lpvt_d8bee18481730a9cf11dbbab4c696107={int(time.time())}',
        'Host': 'www.ddsk.org',
        'Referer': 'http://www.ddsk.org/book/233924.html'
    }
    return headers
async def get_link(url):
    if not os.path.exists('小说'):
        os.mkdir('小说')

    name_link_list = []
    async with httpx.AsyncClient() as client:
        response = await client.get(url, headers=get_header())
        html=etree.HTML(response.text)
        dd=html.xpath('/html/body/div[@id="wrapper"]/div[@id="wrapper"]/div[@class="main"]/div/div/div/table[@id="bgdiv"]/tbody/tr/td/div/a')
        for a in dd:
            #获取每章节的url链接
            name=a.text
            link="http://www.ddsk.org/" + a.get('href')
            name_link_list.append((name,link))
        task = []
        for name, link in name_link_list:
            task.append(asyncio.create_task(get_text(name, link)))
        await asyncio.wait(task)

async def save_data(name,text):
    f=open(f'小说/{name}.txt','w',encoding='utf-8')
    f.write(name)
    f.write('\n')
    for texts in text:
        f.write(texts)
        f.write('\n')
        print(f'正在爬取{name}')

async def get_text(name, link):
    async with httpx.AsyncClient() as client:
        response=await client.get(link, headers=get_header())
        html = etree.HTML(response.text)
        text = html.xpath('//div[@id="content"]/text()')
        new_text=[]
        for t in text:
            t = t.replace('\r\n','')
            t = t.replace('\n','')
            t = t.replace('\xa0','')
            if t.strip():
                new_text.append(t.strip())
        await save_data(name, new_text)


if __name__=='__main__':
    url = 'http://www.ddsk.org/book/233924.html'
    loop = asyncio.run(get_link(url))
    # asyncio.run(get_text('拍花造畜', 'http://www.ddsk.org//chapter/233924_75891502.html'))/