import os  
import asyncio  
import aiofiles  
import pandas as pd  
import httpx  
from parsel import Selector  

# 生成要爬取的 URL 列表  
url_list = [f'http://news.nankai.edu.cn/ywsd/system/count//0003000/000000000000/000/000/c0003000000000000000_000000{i}.shtml' for i in range(1, 641)]
url_list.append('https://news.nankai.edu.cn/ywsd/index.shtml')  

# 设置并发请求的信号量  
concurrent_requests = asyncio.Semaphore(5)  
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())  

# 用于存储提取的链接  
extracted_urls = {}  
# 创建 DataFrame 用于存储标题和 URL  
url_dataframe = pd.DataFrame(columns=['url'])  
url_dataframe.index.name = 'title'  

async def parse_catalog(url):  
    async with concurrent_requests:  
        async with httpx.AsyncClient() as client:  
            response = await client.get(url)  
            if response.status_code == 302:  # 处理重定向  
                redirect_url = response.headers.get("Location")  
                if redirect_url:  
                    response = await client.get(redirect_url)  
            selector = Selector(response.text)  
            print(response.text)  # 打印网页结构  
            extracted_urls.update(zip(selector.css('a::attr(href)').getall(), selector.css('a::text').getall()))  

async def parse_article(url):  
    async with concurrent_requests:  
        print("正在爬取：" + str(url))  
        try:  
            async with httpx.AsyncClient(follow_redirects=True, timeout=10,  
                                         headers={'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Mobile Safari/537.36 Edg/119.0.0.0'}) as client:  
                if url.startswith('http') or url.startswith('https'):  
                    response = await client.get(url)  
                    selector = Selector(response.text)  
                    title = selector.css('title::text').get()  
                    try:  
                        if "/" in title:  
                            title = title.replace("/", "_")  
                        async with aiofiles.open(f'./tju_zhxw_pages/{title}.html', mode='w', encoding='utf-8') as file:  
                            await file.write(response.text)  
                        url_dataframe.loc[title] = url  # 将标题和 URL 添加到 DataFrame  
                    except Exception as e:  
                        print(f'{e}: {url}|{title}')  
        except Exception as e:  
            print(f'error: {url} - {e}')  

async def main():  
    if not os.path.exists('./tju_zhxw_pages'):  # 创建保存网页的文件夹  
        os.mkdir('./tju_zhxw_pages')  

    if not os.path.exists('./tju_zhxw'):  # 创建保存 CSV 文件的文件夹  
        os.mkdir('./tju_zhxw')  

    # 解析目录页面  
    catalog_tasks = [asyncio.create_task(parse_catalog(url)) for url in url_list]  
    await asyncio.gather(*catalog_tasks)  

    # 解析提取的文章页面  
    article_tasks = [asyncio.create_task(parse_article(url)) for url in extracted_urls.keys()]  
    await asyncio.gather(*article_tasks)  

    # 写入文件到文件夹  
    url_dataframe.to_csv("./tju_zhxw/tju_zhxw1_1-10.csv")  

if __name__ == '__main__':  
    asyncio.run(main())