import asyncio
import json
import aiohttp
from lxml import etree
import os
import re
import csv
import aiofiles


# 提取下载地址
def extract_url(text):
    pattern = r"openPackage\(\d+,'[^']*',\s*'(https?://[^']*)'\)"
    match = re.search(pattern, text)
    return match.group(1) if match else None


# 规范 URL
def normalize_url(url, default_scheme='http'):
    if url.startswith('//'):
        return f'{default_scheme}:{url}'
    return url


# 异步获取网页 HTML
async def fetch_html(session, url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36'
    }
    try:
        async with session.get(url, headers=headers) as response:
            if response.status == 200:
                return etree.HTML(await response.read())
            else:
                raise Exception(f"Error fetching HTML from {url}: HTTP status {response.status}")
    except aiohttp.ClientError as e:
        raise Exception(e)


class Crsky:
    def __init__(self, window, params: dict):
        self.window = window
        # 创建图片下载目录
        self.IMAGE_DIR = params.get("image_path")
        self.CSV_DIR = f"{params.get('csv_name')}.csv"
        self.CATEOGRY = params.get("category")
        os.makedirs(self.IMAGE_DIR, exist_ok=True)  # 只创建一次
        self.PREFIX = params.get("image_path")

    # 同步写入 CSV 表头
    def write_csv_header(self):
        with open(self.CSV_DIR, mode='w', newline='', encoding='utf-8') as file:
            writer = csv.writer(file)
            writer.writerow(['资源名称', '资源图片', '资源介绍', '资源分类', '下载地址', '更多字段'])  # 写入表头

    # 同步追加写入数据行
    def write_csv_row(self, row):
        with open(self.CSV_DIR, mode='a', newline='', encoding='utf-8') as file:  # 使用'a'模式追加写入
            writer = csv.writer(file)
            writer.writerow(row)

    # 异步下载图片
    async def download_image(self, session, image_url):
        image_name = image_url.split('/')[-1]
        save_path = os.path.join(self.IMAGE_DIR, image_name)

        try:
            async with session.get(image_url) as response:
                if response.status == 200:
                    async with aiofiles.open(save_path, 'wb') as f:
                        content = await response.read()
                        await f.write(content)
                else:
                    raise Exception(f"Error downloading image from {image_url}: HTTP status {response.status}")
        except aiohttp.ClientError as e:
            raise Exception(e)

    # 异步获取列表页
    async def fetch_list(self, session, url):
        try:
            html = await fetch_html(session, url)
            lists = html.xpath('/html/body/div[6]/div/div[1]/div[3]/div')
            tasks = []
            for i in lists:
                detail_url = "https://www.crsky.com/" + i.xpath('h3/a/@href')[0]
                image_url = i.xpath("div/p/a/img/@src")[0]
                tasks.append(asyncio.create_task(self.fetch_detail(session, detail_url, image_url)))

            # 捕获所有任务的异常
            results = await asyncio.gather(*tasks, return_exceptions=True)
            for result in results:
                if isinstance(result, Exception):
                    raise Exception(result)
        except Exception as e:
            raise Exception(e)

    # 异步获取详情页
    async def fetch_detail(self, session, url, image_url):
        try:
            html = await fetch_html(session, url)
            title = html.xpath('/html/body/div[5]/div[1]/div[1]/div[1]/h1/text()')[0]
            content = html.xpath('string(//*[@id="rom_des"])').replace('\n', ' ').replace('\r', ' ').strip()
            download_url = html.xpath('//ul[@class="clearfix Adown_v_pt active"]/li/a/@onclick')
            if len(download_url) != 0:
                download_url = extract_url(download_url[0])

            if not download_url:
                return
            image_name = self.PREFIX + normalize_url(image_url).split('/')[-1]
            mores = {}

            more_html = html.xpath('/html/body/div[5]/div[1]/div[1]/ul')
            for i in more_html:
                mores = {
                    "软件等级:": 15,
                    "软件大小": i.xpath('//em[@itemprop="fileSize"]/text()')[0],
                    "运行环境": i.xpath('//em[@itemprop="operatingSystem"]/text()')[0],
                    "授权方式": i.xpath('//em[@itemprop="license"]/text()')[0],
                    "支持语音": i.xpath('//em[@itemprop="inLanguage"]/text()')[0],
                    "更新时间": i.xpath('//em[@itemprop="dateModified"]/text()')[0],
                }
            await self.download_image(session, normalize_url(image_url))
            data = [title, image_name, content, self.CATEOGRY, download_url, json.dumps(mores)]
            self.write_csv_row(data)
        except Exception as e:
            raise Exception(e)


# 主函数
async def main(window, params: dict):
    try:
        crsky = Crsky(window, params)
        async with aiohttp.ClientSession() as session:
            tasks = []
            for i in range(1, int(params.get('page_number'))):
                url = f"{params.get('site_path')}{i}.html"
                tasks.append(asyncio.create_task(crsky.fetch_list(session, url)))
            await asyncio.gather(*tasks)
    except Exception as e:
        raise Exception(e)
