import asyncio
import csv
import os
import sqlite3
import sys
from datetime import datetime
from typing import Dict, List, Any, Optional, Callable
from aiohttp import web
from pydantic import BaseModel, validator, Field, field_validator
import aiofiles
import io
from ..core.database.connection import db_manager
from .utils.task_manager import task_manager

IMAGE_EXTENSIONS = {'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp'}
TEXT_EXTENSIONS = {'.txt'}
# 数据模型验证
class ImportCSVRequest(BaseModel):
    filePath: str
    taskId: str

class ImportTagsCSVRequest(ImportCSVRequest):
    name: str
    group: str

class ExportCharacterQuery(BaseModel):
    rating_min: Optional[float] = Field(None, ge=0, le=10)
    rating_max: Optional[float] = Field(None, ge=0, le=10)
    name: Optional[str] = None
    cn_name: Optional[str] = None
    tag: Optional[str] = None
    description: Optional[str] = None
    favorite: Optional[bool] = None
    disliked: Optional[bool] = None
    sort_field: str = 'name'
    sort_order: str = 'ASC'
    taskId: str

    @field_validator('favorite', 'disliked', mode='before')
    def convert_favorite(cls, v):
        if v == '':
            return None
        if v == 0:
            return False
        if v == 1:
            return True
        return v


class ExportPromptItemsQuery(BaseModel):
    min_rating: Optional[float] = Field(None, ge=0, le=10)
    max_rating: Optional[float] = Field(None, ge=0, le=10)
    class_type: Optional[str] = None
    name: Optional[str] = None
    customize_name: Optional[str] = None
    tags: Optional[str] = None
    favorite: Optional[bool] = None
    disliked: Optional[bool] = None
    id: Optional[str] = None
    sort_field: str = 'created_at'
    sort_order: str = 'DESC'
    taskId: str

    @field_validator('favorite', 'disliked', mode='before')
    def convert_favorite(cls, v):
        if v == '':
            return None
        if v == 0:
            return False
        if v == 1:
            return True
        return v


class ExportTranslateQuery(BaseModel):
    hit_count_min: Optional[int] = Field(None, ge=0)
    hit_count_max: Optional[int] = Field(None, ge=0)
    en: Optional[str] = None
    cn: Optional[str] = None
    sort_field: str = 'en'
    sort_order: str = 'ASC'
    taskId: str


class ExportTagsQuery(BaseModel):
    group: Optional[str] = None
    name: Optional[str] = None
    tag: Optional[str] = None
    description: Optional[str] = None
    taskId: str


class TaskProgressQuery(BaseModel):
    taskId: str

class CSVImporter:
    last_reported_progress = 0
    last_report_time = 0
    estimated_total_lines = 0
    yield_interval = 100  # 每处理100条记录让出一次事件循环
    progress_throttle = 500  # 进度报告节流时间(ms)

    @staticmethod
    async def import_flexible_csv(
            file_path: str,
            table_name: str,
            field_mapping: Dict[str, str],
            required_fields: List[str] = [],
            default_values: Dict[str, Any] = {},
            batch_size: int = 5000,
            progress_callback: Optional[Callable[[int], None]] = None,
            progress_start_callback: Optional[Callable[[int], None]] = None,
            encoding: str = 'utf-8'  # 新增：编码参数，默认utf-8
    ):
        # 重置状态
        CSVImporter.last_reported_progress = 0
        CSVImporter.last_report_time = 0
        CSVImporter.estimated_total_lines = 0

        # 检查文件是否存在
        if not os.path.exists(file_path):
            raise ValueError(f"文件不存在: {file_path}")

        # 估算总行数（用于进度计算）- 传递编码参数
        CSVImporter.estimated_total_lines = await CSVImporter.estimate_line_count(file_path, encoding)
        if progress_start_callback and CSVImporter.estimated_total_lines:
            progress_start_callback(CSVImporter.estimated_total_lines)

        # 解析CSV并导入 - 传递编码参数
        return await CSVImporter.parse_csv_with_newlines(
            file_path,
            table_name,
            field_mapping,
            required_fields,
            default_values,
            batch_size,
            progress_callback,
            encoding  # 新增：传递编码参数
        )

    @staticmethod
    async def parse_csv_with_newlines(
            file_path: str,
            table_name: str,
            field_mapping: Dict[str, str],
            required_fields: List[str],
            default_values: Dict[str, Any],
            batch_size: int,
            progress_callback: Optional[Callable[[int], None]] = None,
            encoding: str = 'utf-8'  # 新增：编码参数
    ):
        # 使用线程池同步读取文件内容 - 使用指定编码
        def read_file_sync():
            # 关键修改：使用传入的encoding参数，而非硬编码utf-8
            with open(file_path, 'r', encoding=encoding, errors='replace') as f:
                return f.read()

        content = await asyncio.to_thread(read_file_sync)

        # 解析CSV内容
        csv_reader = csv.DictReader(io.StringIO(content))
        records = list(csv_reader)

        inserted_count = 0
        batch: List[Dict[str, Any]] = []
        start_time = datetime.now().timestamp() * 1000
        last_progress_report = start_time

        try:
            for i, record in enumerate(records):
                # 映射记录到数据库字段
                mapped_record = CSVImporter.map_record(
                    record,
                    field_mapping,
                    required_fields,
                    default_values
                )

                batch.append(mapped_record)
                now = datetime.now().timestamp() * 1000

                # 检查进度报告
                if progress_callback and now - last_progress_report > CSVImporter.progress_throttle:
                    progress_callback(i)
                    last_progress_report = now

                # 达到批次大小时插入
                if len(batch) >= batch_size:
                    inserted_count += await CSVImporter.insert_batch(table_name, batch)
                    batch = []
                    if progress_callback:
                        progress_callback(i)

                # 定期让出事件循环
                if i % CSVImporter.yield_interval == 0:
                    await asyncio.sleep(0)

            # 插入剩余记录
            if batch:
                inserted_count += await CSVImporter.insert_batch(table_name, batch)

            # 最终进度报告
            if progress_callback:
                progress_callback(len(records))

            duration = (datetime.now().timestamp() * 1000 - start_time) / 1000

            return {
                "success": True,
                "inserted": inserted_count,
                "total": len(records),
                "duration": duration
            }
        except Exception as e:
            raise ValueError(f"CSV导入失败: {str(e)}")

    @staticmethod
    async def estimate_line_count(file_path: str, encoding: str = 'utf-8') -> int:
        # 新增：接收encoding参数
        try:
            def count_lines_sync():
                # 关键修改：使用传入的encoding参数
                with open(file_path, 'r', encoding=encoding, errors='replace') as f:
                    line_count = 0
                    for _ in f:
                        line_count += 1
                    return max(0, line_count - 1)  # 减去标题行

            return await asyncio.to_thread(count_lines_sync)
        except Exception as e:
            raise ValueError(f"估算行数失败: {str(e)}")

    @staticmethod
    def map_record(
            record: Dict[str, Any],
            field_mapping: Dict[str, str],
            required_fields: List[str],
            default_values: Dict[str, Any]
    ) -> Dict[str, Any]:
        mapped_record: Dict[str, Any] = {}

        # 应用字段映射（特殊处理id字段）
        for csv_field, db_field in field_mapping.items():
            # 处理id字段：仅当CSV中id有非空值时才添加到映射结果
            if db_field == 'id':
                # 检查CSV中的id是否为有效非空值
                if csv_field in record and record[csv_field] is not None:
                    id_value = str(record[csv_field]).strip()
                    if id_value:  # 非空字符串才保留
                        mapped_record[db_field] = id_value
                # 空值时不添加id字段，由数据库生成默认值
                continue

            # 处理其他字段
            if csv_field in record:
                mapped_record[db_field] = CSVImporter.transform_value(db_field, record[csv_field])

        # 添加默认值（跳过id，因为数据库有默认值）
        for db_field, default_value in default_values.items():
            if db_field != 'id' and db_field not in mapped_record:  # 排除id的默认值设置
                mapped_record[db_field] = default_value

        # 验证必填字段
        for field in required_fields:
            if field not in mapped_record:
                raise ValueError(f"缺少必填字段: {field}")

        # 添加时间戳（如果需要更新时自动刷新updated_at，可在这里处理）
        now = datetime.now().isoformat()
        mapped_record['created_at'] = now  # 新增时的创建时间
        mapped_record['updated_at'] = now  # 新增或更新时的时间

        return mapped_record

    @staticmethod
    def transform_value(field: str, value: Any) -> Any:
        # 处理布尔值字段
        if field in ['favorite', 'disliked']:
            return CSVImporter.parse_boolean(value)

        # 处理数值字段
        if field == 'rating':
            try:
                return float(value) if value else 0.0
            except ValueError:
                return 0.0
        return str(value) if value is not None else ''

    @staticmethod
    def parse_boolean(value: Any) -> int:
        if isinstance(value, bool):
            return 1 if value else 0

        str_val = str(value).lower().strip()
        return 1 if str_val in ['true', 'yes', '1', 'y', 't'] else 0

    @staticmethod
    async def insert_batch(table_name: str, batch: List[Dict[str, Any]]) -> int:
        if not batch:
            return 0

        columns = list(batch[0].keys())
        column_names = ', '.join(columns)
        total_changes = 0

        # 计算每批最大记录数（SQLite 最多支持 999 个占位符）
        max_per_batch = max(1, 999 // len(columns))

        try:
            with db_manager.get_connection() as conn:
                conn.execute('BEGIN TRANSACTION')

                for i in range(0, len(batch), max_per_batch):
                    chunk = batch[i:i + max_per_batch]

                    # 构建多行插入语句
                    row_placeholders = ', '.join(['(' + ', '.join(['?'] * len(columns)) + ')'] * len(chunk))
                    sql = f'INSERT OR REPLACE INTO {table_name} ({column_names}) VALUES {row_placeholders}'

                    # 平铺所有值
                    values = []
                    for record in chunk:
                        values.extend([record[col] for col in columns])

                    # 执行插入
                    cursor = conn.execute(sql, values)
                    total_changes += cursor.rowcount

                conn.execute('COMMIT')
                return total_changes
        except Exception as e:
            try:
                with db_manager.get_connection() as conn:
                    conn.execute('ROLLBACK')
            except Exception as rollback_err:
                print(f"回滚失败: {str(rollback_err)}")
            raise ValueError(f"批量插入失败: {str(e)}")



# 路由处理函数
async def handle_import_character_csv(request: web.Request) -> web.Response:
    temp_file_path = ""
    # 常见中文编码列表（按优先级排序）
    COMMON_ENCODINGS = ['utf-8', 'utf-8-sig', 'gbk', 'gb2312', 'ansi']

    try:
        # 接收FormData中的文件和参数
        data = await request.post()
        print("开始处理角色表CSV导入请求")

        # 获取CSV文件
        file_field = data.get('file')
        if not file_field:
            print("错误：未提供CSV文件")
            return web.json_response({
                "success": False,
                "error": "未提供CSV文件"
            }, status=400)

        # 获取必要参数
        task_id = data.get('taskId')

        # 参数验证
        if not task_id or not isinstance(task_id, str):
            print(f"错误：无效的taskId，值为: {task_id}")
            return web.json_response({
                "success": False,
                "error": "无效的taskId"
            }, status=400)
        print(f"获取到有效的taskId: {task_id}")

        # 创建临时文件路径
        temp_dir = os.path.join(os.path.dirname(__file__), 'temp')
        temp_file_path = f"{temp_dir}/character_{task_id}_{os.urandom(8).hex()}.csv"
        print(f"创建临时文件路径: {temp_file_path}")

        # 确保临时目录存在
        loop = request.app.loop
        await loop.run_in_executor(
            None,
            lambda: os.makedirs(temp_dir, exist_ok=True, mode=0o755)
        )
        print("临时目录准备就绪")

        # 读取上传的CSV数据
        csv_data = file_field.file.read()
        if not csv_data:
            print(f"错误：CSV文件为空 (taskId: {task_id})")
            return web.json_response({
                "success": False,
                "error": "无效的CSV数据（空文件）"
            }, status=400)
        print(f"成功读取CSV数据，大小: {len(csv_data)} bytes (taskId: {task_id})")

        # 保存临时文件
        await loop.run_in_executor(
            None,
            lambda: open(temp_file_path, 'wb').write(csv_data)
        )
        print(f"CSV数据已保存到临时文件 (taskId: {task_id})")

        # 检测文件编码
        file_encoding = None
        for encoding in COMMON_ENCODINGS:
            try:
                csv_data.decode(encoding)
                file_encoding = encoding
                break
            except UnicodeDecodeError:
                continue

        if not file_encoding:
            import sys
            file_encoding = sys.getdefaultencoding()
            print(f"所有常见编码测试失败，使用系统默认编码: {file_encoding} (taskId: {task_id})")
        else:
            print(f"检测到文件编码: {file_encoding} (taskId: {task_id})")

        # 角色表的字段映射
        field_mapping = {
            'name': 'name',
            'cn_name': 'cn_name',
            'tag': 'tag',
            'description': 'description',
            'rating': 'rating',
            'favorite': 'favorite',
            'disliked': 'disliked'
        }

        # 必填字段
        required_fields = ['name','tag']

        # 默认值配置
        default_values = {
            'rating': 0.0,
            'favorite': 0,
            'disliked': 0
        }

        # 进度回调
        def progress_callback(progress: int) -> None:
            task_manager.update_progress(task_id, progress)

        def progress_create_callback(total: int) -> None:
            task_manager.create_task(task_id, total)

        # 异步处理导入
        async def process_import():
            try:
                # 检查文件是否存在
                if not os.path.exists(temp_file_path):
                    error_msg = f"文件不存在: {temp_file_path}"
                    print(f"错误 (taskId: {task_id}): {error_msg}")
                    raise ValueError(error_msg)

                print(f"开始调用CSVImporter导入角色数据 (taskId: {task_id})")
                await CSVImporter.import_flexible_csv(
                    temp_file_path,
                    'character',
                    field_mapping,
                    required_fields,
                    default_values,
                    5000,
                    progress_callback,
                    progress_create_callback,
                    encoding=file_encoding
                )

                print(f"角色表导入完成 (taskId: {task_id})")
                task_manager.complete_task(task_id)

            except Exception as e:
                error_msg = f"导入过程发生错误: {str(e)}"
                print(f"错误 (taskId: {task_id}): {error_msg}")
                try:
                    task_manager.cancel_task(task_id)
                    print(f"已取消任务 (taskId: {task_id})")
                except Exception as cancel_err:
                    print(f"取消任务失败 (taskId: {task_id}): {str(cancel_err)}")
                raise e
            finally:
                # 清理临时文件
                if temp_file_path and os.path.exists(temp_file_path):
                    try:
                        await loop.run_in_executor(
                            None,
                            lambda: os.unlink(temp_file_path)
                        )
                        print(f"临时文件已清理: {temp_file_path} (taskId: {task_id})")
                    except OSError as cleanup_error:
                        if cleanup_error.errno != 2:
                            print(f"临时文件清理错误 (taskId: {task_id}): {str(cleanup_error)}")

        # 非阻塞处理导入任务
        asyncio.create_task(process_import())
        print(f"已创建异步任务处理角色表导入 (taskId: {task_id})")

        return web.json_response({
            "success": True,
            "message": "角色表导入任务已开始",
            "taskId": task_id
        })

    except ValueError as e:
        error_msg = f"值错误: {str(e)}"
        print(f"{error_msg}, taskId: {task_id if 'task_id' in locals() else '未设置'}")
        return web.json_response({
            "success": False,
            "error": str(e)
        }, status=400)
    except Exception as e:
        error_msg = f"发生异常: {str(e)}"
        print(f"{error_msg}, taskId: {task_id if 'task_id' in locals() else '未设置'}")
        return web.json_response({
            "success": False,
            "error": str(e)
        }, status=500)


async def handle_import_prompt_items_csv(request: web.Request) -> web.Response:
    temp_file_path = ""
    # 常见中文编码列表（按优先级排序）
    COMMON_ENCODINGS = ['utf-8', 'utf-8-sig', 'gbk', 'gb2312', 'ansi']

    try:
        # 接收FormData中的文件和参数
        data = await request.post()

        # 获取CSV文件
        file_field = data.get('file')
        if not file_field:
            return web.json_response({
                "success": False,
                "error": "未提供CSV文件"
            }, status=400)

        # 获取必要参数
        task_id = data.get('taskId')

        # 参数验证
        if not task_id or not isinstance(task_id, str):
            return web.json_response({
                "success": False,
                "error": "无效的taskId"
            }, status=400)

        # 创建临时文件路径
        temp_dir = os.path.join(os.path.dirname(__file__), 'temp')
        temp_file_path = f"{temp_dir}/prompt_{task_id}_{os.urandom(8).hex()}.csv"

        # 确保临时目录存在
        loop = request.app.loop
        await loop.run_in_executor(
            None,
            lambda: os.makedirs(temp_dir, exist_ok=True, mode=0o755)
        )

        # 读取上传的CSV数据
        csv_data = file_field.file.read()
        if not csv_data:
            return web.json_response({
                "success": False,
                "error": "无效的CSV数据（空文件）"
            }, status=400)

        # 保存临时文件
        await loop.run_in_executor(
            None,
            lambda: open(temp_file_path, 'wb').write(csv_data)
        )

        # 检测文件编码
        file_encoding = None
        for encoding in COMMON_ENCODINGS:
            try:
                csv_data.decode(encoding)
                file_encoding = encoding
                break
            except UnicodeDecodeError:
                continue

        if not file_encoding:
            import sys
            file_encoding = sys.getdefaultencoding()

        # prompt_items表的字段映射
        field_mapping = {
            'id': 'id',
            'class_type': 'class_type',
            'name': 'name',
            'customize_name': 'customize_name',
            'image': 'image',
            'tags': 'tags',
            'description': 'description',
            'rating': 'rating',
            'favorite': 'favorite',
            'disliked': 'disliked'
        }

        # 必填字段
        required_fields = ['tags']

        # 默认值配置
        default_values = {
            'rating': 0.0,
            'favorite': 0,
            'disliked': 0,
            'class_type': 'collect',
            'name': '',
            'tags': ''
        }

        # 进度回调
        def progress_callback(progress: int) -> None:
            task_manager.update_progress(task_id, progress)

        def progress_create_callback(total: int) -> None:
            task_manager.create_task(task_id, total)

        # 异步处理导入
        async def process_import():
            try:
                # 检查文件是否存在
                if not os.path.exists(temp_file_path):
                    raise ValueError(f"文件不存在: {temp_file_path}")

                await CSVImporter.import_flexible_csv(
                    temp_file_path,
                    'prompt_items',
                    field_mapping,
                    required_fields,
                    default_values,
                    5000,
                    progress_callback,
                    progress_create_callback,
                    encoding=file_encoding  # 传递编码参数
                )

                task_manager.complete_task(task_id)

            except Exception as e:
                try:
                    task_manager.cancel_task(task_id)
                except:
                    pass
                raise e
            finally:
                # 清理临时文件
                if temp_file_path and os.path.exists(temp_file_path):
                    try:
                        await loop.run_in_executor(
                            None,
                            lambda: os.unlink(temp_file_path)
                        )
                    except OSError as cleanup_error:
                        if cleanup_error.errno != 2:
                            pass

        # 非阻塞处理导入任务
        asyncio.create_task(process_import())

        return web.json_response({
            "success": True,
            "message": "prompt_items表导入任务已开始",
            "taskId": task_id
        })

    except ValueError as e:
        return web.json_response({
            "success": False,
            "error": str(e)
        }, status=400)
    except Exception as e:
        return web.json_response({
            "success": False,
            "error": str(e)
        }, status=500)


async def handle_import_tags_csv(request: web.Request) -> web.Response:
    temp_file_path = ""
    # 常见中文编码列表（按优先级排序）
    COMMON_ENCODINGS = ['utf-8', 'utf-8-sig', 'gbk', 'gb2312', 'ansi']

    try:
        # 接收FormData中的文件和参数
        data = await request.post()
        print("成功接收标签CSV的FormData数据")

        # 获取CSV文件
        file_field = data.get('file')
        if not file_field:
            print("错误：未提供标签CSV文件")
            return web.json_response({
                "success": False,
                "error": "未提供标签CSV文件"
            }, status=400)
        print("成功获取标签CSV文件字段")

        # 获取必要参数
        task_id = data.get('taskId')
        group_name = data.get('group')
        name = data.get('name')

        # 参数验证
        if not task_id or not isinstance(task_id, str):
            print(f"错误：无效的taskId，值为: {task_id}")
            return web.json_response({
                "success": False,
                "error": "无效的taskId"
            }, status=400)

        if not group_name or not isinstance(group_name, str):
            print(f"错误：无效的group参数，值为: {group_name}")
            return web.json_response({
                "success": False,
                "error": "无效的group参数"
            }, status=400)

        if not name or not isinstance(name, str):
            print(f"错误：无效的name参数，值为: {name}")
            return web.json_response({
                "success": False,
                "error": "无效的name参数"
            }, status=400)

        print(f"成功获取参数 - taskId: {task_id}, group: {group_name}, name: {name}")

        # 创建临时文件路径
        temp_dir = os.path.join(os.path.dirname(__file__), 'temp')
        temp_file_path = f"{temp_dir}/tags_{task_id}_{os.urandom(8).hex()}.csv"
        print(f"创建临时文件路径: {temp_file_path}")

        # 确保临时目录存在
        loop = request.app.loop
        await loop.run_in_executor(
            None,
            lambda: os.makedirs(temp_dir, exist_ok=True, mode=0o755)
        )
        print("临时目录准备就绪")

        # 读取上传的CSV数据
        csv_data = file_field.file.read()
        if not csv_data:
            print("错误：无效的标签CSV数据（空文件）")
            return web.json_response({
                "success": False,
                "error": "无效的CSV数据"
            }, status=400)
        print(f"成功读取标签CSV数据，大小: {len(csv_data)} bytes")

        # 保存临时文件
        await loop.run_in_executor(
            None,
            lambda: open(temp_file_path, 'wb').write(csv_data)
        )
        print(f"标签CSV数据已保存到临时文件")

        # 检测文件编码
        file_encoding = None
        for encoding in COMMON_ENCODINGS:
            try:
                csv_data.decode(encoding)
                file_encoding = encoding
                print(f"成功通过编码测试: {encoding}")
                break
            except UnicodeDecodeError:
                continue

        if not file_encoding:
            import sys
            file_encoding = sys.getdefaultencoding()
            print(f"所有常见编码测试失败，使用系统默认编码: {file_encoding}")

        # 先处理tag_groups，确保分组存在
        tag_group_id = None
        try:
            with db_manager.get_connection() as conn:
                conn.execute('BEGIN TRANSACTION')

                # 插入或忽略分类
                conn.execute(
                    """INSERT OR IGNORE INTO tag_groups (group_name, name)
                       VALUES (?, ?)""",
                    [group_name, name]
                )

                # 获取分类ID
                group_cursor = conn.execute(
                    """SELECT id FROM tag_groups
                       WHERE group_name = ? AND name = ?""",
                    [group_name, name]
                )
                group_result = group_cursor.fetchone()

                if not group_result:
                    raise ValueError('分类创建或获取失败')

                tag_group_id = group_result[0]
                conn.execute('COMMIT')
                print(f"成功获取或创建标签组，ID: {tag_group_id}")

        except sqlite3.IntegrityError as e:
            with db_manager.get_connection() as conn:
                conn.execute('ROLLBACK')
            if 'UNIQUE' in str(e):
                return web.json_response({
                    "error": "标签已存在",
                    "taskId": task_id
                }, status=409)
            raise

        # tags表的字段映射
        field_mapping = {
            'value': 'tag',
            'label': 'tag_des'
        }

        # 必填字段
        required_fields = ['tag', 'tag_des']

        # 默认值配置
        default_values = {
            'tag_group_id': tag_group_id
        }

        # 进度回调
        def progress_callback(progress: int) -> None:
            print(f"更新标签导入进度 - taskId: {task_id}, 进度: {progress}")
            task_manager.update_progress(task_id, progress)

        def progress_create_callback(total: int) -> None:
            print(f"创建标签导入任务 - taskId: {task_id}, 总进度: {total}")
            task_manager.create_task(task_id, total)

        # 异步处理导入
        async def process_import():
            try:
                print(f"开始处理标签导入任务 - taskId: {task_id}")
                # 检查文件是否存在
                if not os.path.exists(temp_file_path):
                    print(f"警告：process_import中文件不存在 - {temp_file_path}")
                    raise ValueError(f"文件不存在: {temp_file_path}")

                print(f"调用CSVImporter.import_flexible_csv (编码: {file_encoding}) - taskId: {task_id}")
                await CSVImporter.import_flexible_csv(
                    temp_file_path,
                    'tags',
                    field_mapping,
                    required_fields,
                    default_values,
                    5000,
                    progress_callback,
                    progress_create_callback,
                    encoding=file_encoding  # 传递编码参数
                )

                print(f"标签导入完成，标记任务为完成 - taskId: {task_id}")
                task_manager.complete_task(task_id)

            except Exception as e:
                print(f"标签导入过程中发生错误 - taskId: {task_id}, 错误: {str(e)}")
                print(f"尝试取消任务 - taskId: {task_id}")
                try:
                    task_manager.cancel_task(task_id)
                except Exception as cancel_err:
                    print(f"取消任务时发生错误 - taskId: {task_id}, 错误: {str(cancel_err)}")
                raise e
            finally:
                # 清理临时文件
                if temp_file_path and os.path.exists(temp_file_path):
                    try:
                        await loop.run_in_executor(
                            None,
                            lambda: os.unlink(temp_file_path)
                        )
                        print(f"标签临时文件已清理 - {temp_file_path}")
                    except Exception as cleanup_error:
                        if isinstance(cleanup_error, OSError) and cleanup_error.errno != 2:
                            print(f"标签临时文件清理错误: {str(cleanup_error)}")

        # 非阻塞处理导入任务
        print(f"创建异步任务处理标签导入 - taskId: {task_id}")
        asyncio.create_task(process_import())

        return web.json_response({
            "success": True,
            "message": "tags表导入任务已开始",
            "taskId": task_id,
            "tagGroupId": tag_group_id
        })

    except ValueError as e:
        print(f"值错误: {str(e)}, taskId: {task_id if 'task_id' in locals() else '未设置'}")
        return web.json_response({
            "success": False,
            "error": str(e)
        }, status=400)
    except Exception as e:
        print(f"发生异常: {str(e)}, taskId: {task_id if 'task_id' in locals() else '未设置'}")
        return web.json_response({
            "success": False,
            "error": str(e)
        }, status=500)


async def handle_import_translate_csv(request: web.Request) -> web.Response:
    temp_file_path = ""
    # 常见中文编码列表（按优先级排序，不依赖第三方库）
    COMMON_ENCODINGS = ['utf-8', 'ansi', 'gbk', 'gb2312', 'utf-8-sig']

    try:
        # 接收FormData中的文件和参数
        data = await request.post()
        print("成功接收FormData数据")

        # 获取CSV文件
        file_field = data.get('file')
        if not file_field:
            print("错误：未提供CSV文件")
            return web.json_response({
                "success": False,
                "error": "未提供CSV文件"
            }, status=400)
        print("成功获取CSV文件字段")

        # 获取taskId参数
        task_id = data.get('taskId')
        if not task_id or not isinstance(task_id, str):
            print(f"错误：无效的taskId，值为: {task_id}")
            return web.json_response({
                "success": False,
                "error": "无效的taskId"
            }, status=400)
        print(f"成功获取taskId: {task_id}")

        # 创建临时文件路径
        temp_dir = os.path.join(os.path.dirname(__file__), 'temp')
        temp_file_path = f"{temp_dir}/translate_{task_id}_{os.urandom(8).hex()}.csv"
        print(f"创建临时文件路径: {temp_file_path}")

        # 确保临时目录存在
        loop = request.app.loop
        await loop.run_in_executor(
            None,
            lambda: os.makedirs(temp_dir, exist_ok=True, mode=0o755)
        )
        print("临时目录准备就绪")

        # 读取上传的CSV数据
        csv_data = file_field.file.read()
        if not csv_data:
            print("错误：无效的CSV数据（空文件）")
            return web.json_response({
                "success": False,
                "error": "无效的CSV数据"
            }, status=400)
        print(f"成功读取CSV数据，大小: {len(csv_data)} bytes")

        # 保存临时文件（保持原始字节）
        await loop.run_in_executor(
            None,
            lambda: open(temp_file_path, 'wb').write(csv_data)
        )
        print(f"CSV数据已保存到临时文件")

        # 尝试检测编码（不依赖第三方库，通过常见编码测试）
        file_encoding = None
        for encoding in COMMON_ENCODINGS:
            try:
                # 尝试用当前编码解码数据
                csv_data.decode(encoding)
                file_encoding = encoding
                print(f"成功通过编码测试: {encoding}")
                break
            except UnicodeDecodeError:
                continue

        # 如果所有编码都失败，使用系统默认编码
        if not file_encoding:
            file_encoding = sys.getdefaultencoding()
            print(f"所有常见编码测试失败，使用系统默认编码: {file_encoding}")

        # 翻译表的字段映射
        field_mapping = {
            'en': 'en',
            'cn': 'cn'
        }

        # 必填字段
        required_fields = ['en', 'cn']

        # 默认值配置
        default_values = {
            'hit_count': 0
        }

        # 进度计算修正：确保进度在0-100%之间
        total_progress = 0  # 存储总进度值

        def progress_callback(progress: int) -> None:
            # 计算百分比（当前进度/总进度*100），最多100%
            if total_progress <= 0:
                percent = 0
            else:
                percent = min(int(progress / total_progress * 100), 100)
            print(f"更新任务进度 - taskId: {task_id}, 原始进度: {progress}/{total_progress}, 百分比: {percent}%")
            task_manager.update_progress(task_id, percent)

        def progress_create_callback(total: int) -> None:
            nonlocal total_progress
            total_progress = total  # 保存总进度值
            print(f"创建新任务 - taskId: {task_id}, 总进度: {total}")
            task_manager.create_task(task_id, 100)  # 任务总进度固定为100%

        # 异步处理导入
        async def process_import():
            try:
                print(f"开始处理导入任务 - taskId: {task_id}")
                print(f"使用编码: {file_encoding} 解析CSV文件")

                # 检查文件是否存在
                if not os.path.exists(temp_file_path):
                    print(f"警告：process_import中文件不存在 - {temp_file_path}")
                else:
                    print(f"process_import中文件存在 - {temp_file_path}")

                print(f"调用CSVImporter.import_flexible_csv (编码: {file_encoding}) - taskId: {task_id}")
                await CSVImporter.import_flexible_csv(
                    temp_file_path,
                    'translate',
                    field_mapping,
                    required_fields,
                    default_values,
                    5000,
                    progress_callback,
                    progress_create_callback,
                    encoding=file_encoding  # 传递检测到的编码
                )

                print(f"导入完成，标记任务为完成 - taskId: {task_id}")
                task_manager.complete_task(task_id)

            except Exception as e:
                print(f"导入过程中发生错误 - taskId: {task_id}, 错误: {str(e)}")
                print(f"尝试取消任务 - taskId: {task_id}")
                try:
                    task_manager.cancel_task(task_id)
                except Exception as cancel_err:
                    print(f"取消任务时发生错误 - taskId: {task_id}, 错误: {str(cancel_err)}")
                raise e
            finally:
                # 清理临时文件
                if temp_file_path and os.path.exists(temp_file_path):
                    try:
                        await loop.run_in_executor(
                            None,
                            lambda: os.unlink(temp_file_path)
                        )
                        print(f"临时文件已清理 - {temp_file_path}")
                    except Exception as cleanup_error:
                        if isinstance(cleanup_error, OSError) and cleanup_error.errno != 2:
                            print(f"临时CSV文件清理错误: {str(cleanup_error)}")

        # 非阻塞处理导入任务
        print(f"创建异步任务处理导入 - taskId: {task_id}")
        asyncio.create_task(process_import())

        return web.json_response({
            "success": True,
            "message": "翻译表导入任务已开始",
            "taskId": task_id
        })

    except ValueError as e:
        print(f"值错误: {str(e)}, taskId: {task_id if 'task_id' in locals() else '未设置'}")
        return web.json_response({
            "success": False,
            "error": str(e)
        }, status=400)
    except Exception as e:
        print(f"发生异常: {str(e)}, taskId: {task_id if 'task_id' in locals() else '未设置'}")
        return web.json_response({
            "success": False,
            "error": str(e)
        }, status=500)


async def handle_export_character_stream(request: web.Request) -> web.Response:
    try:
        # 解析查询参数
        params = ExportCharacterQuery(**request.query)
        task_id = params.taskId

        # 校验排序字段
        allowed_fields = ['name', 'cn_name', 'tag', 'created_at', 'updated_at', 'rating', 'favorite']
        order_by = params.sort_field if params.sort_field in allowed_fields else 'name'
        order_dir = params.sort_order.upper() if params.sort_order.upper() in ['ASC', 'DESC'] else 'ASC'

        # 构建查询条件
        where = []
        query_params = []

        if params.name:
            where.append("name LIKE ?")
            query_params.append(f'%{params.name}%')

        if params.cn_name:
            where.append("cn_name LIKE ?")
            query_params.append(f'%{params.cn_name}%')

        if params.tag:
            where.append("tag = ?")
            query_params.append(params.tag)

        if params.description:
            where.append("description LIKE ?")
            query_params.append(f'%{params.description}%')

        if params.favorite is not None:
            where.append("favorite = ?")
            query_params.append(1 if params.favorite else 0)

        if params.disliked is not None:
            where.append("disliked = ?")
            query_params.append(1 if params.disliked else 0)

        if params.rating_min is not None:
            where.append("rating >= ?")
            query_params.append(params.rating_min)

        if params.rating_max is not None:
            where.append("rating <= ?")
            query_params.append(params.rating_max)

        where_clause = f"WHERE {' AND '.join(where)}" if where else ""

        # 构建基础SQL
        base_sql = f"""
            SELECT
                name, cn_name, tag,
                description, rating, favorite, disliked,
                created_at, updated_at
            FROM character
            {where_clause}
            ORDER BY {order_by} {order_dir}
        """

        # 查询总数
        count_sql = f"SELECT COUNT(*) AS total FROM character {where_clause}"
        with db_manager.get_connection() as conn:
            count_cursor = conn.execute(count_sql, query_params)
            total = count_cursor.fetchone()[0] or 0

        if total == 0:
            return web.json_response({"error": "No data to export"}, status=404)

        # 创建响应流
        response = web.StreamResponse()
        filename = f"characters_export_{int(datetime.now().timestamp())}.csv"
        response.headers["Content-Type"] = "text/csv; charset=utf-8"  # 添加字符集声明
        response.headers["Content-Disposition"] = f'attachment; filename="{filename}"'
        await response.prepare(request)

        # 初始化任务
        task_manager.create_task(task_id, total)

        try:
            # 使用StringIO处理CSV内容
            from io import StringIO

            # 写入表头
            header_buffer = StringIO()
            csv_writer = csv.DictWriter(
                header_buffer,
                fieldnames=['name', 'cn_name', 'tag', 'description', 'rating',
                           'favorite', 'disliked', 'created_at', 'updated_at']
            )
            csv_writer.writeheader()  # 正确写入表头
            header_content = header_buffer.getvalue()
            await response.write(header_content.encode('utf-8'))

            # 流式分页导出
            page_size = 1000
            page = 1
            processed = 0

            while processed < total:
                offset = (page - 1) * page_size
                data_sql = f"{base_sql} LIMIT ? OFFSET ?"
                data_params = query_params + [page_size, offset]

                with db_manager.get_connection() as conn:
                    conn.row_factory = sqlite3.Row
                    cursor = conn.execute(data_sql, data_params)
                    data = [dict(row) for row in cursor.fetchall()]

                # 处理当前页数据
                row_buffer = StringIO()
                row_writer = csv.DictWriter(
                    row_buffer,
                    fieldnames=['name', 'cn_name', 'tag', 'description', 'rating',
                               'favorite', 'disliked', 'created_at', 'updated_at']
                )

                for row in data:
                    # 格式化数据
                    csv_row = {
                        'name': row['name'],
                        'cn_name': row['cn_name'],
                        'tag': row['tag'],
                        'description': row['description'],
                        'rating': row['rating'],
                        'favorite': 'Yes' if row['favorite'] else 'No',
                        'disliked': 'Yes' if row['disliked'] else 'No',
                        'created_at': datetime.fromisoformat(row['created_at']).isoformat() if row['created_at'] else '',
                        'updated_at': datetime.fromisoformat(row['updated_at']).isoformat() if row['updated_at'] else ''
                    }
                    row_writer.writerow(csv_row)

                # 将当前页数据写入响应流
                row_content = row_buffer.getvalue()
                await response.write(row_content.encode('utf-8'))

                processed += len(data)
                page += 1
                task_manager.update_progress(task_id, processed)

                # 让出事件循环
                await asyncio.sleep(0)

            task_manager.complete_task(task_id)
            await response.write_eof()  # 确保响应流正确结束
            return response

        except Exception as e:
            task_manager.cancel_task(task_id)
            # 流已经开始，通过流发送错误信息
            await response.write(f"Error: {str(e)}".encode('utf-8'))
            await response.write_eof()
            return response

    except ValueError as e:
        return web.json_response({"error": str(e)}, status=400)
    except Exception as e:
        # 在响应准备之前的错误，返回JSON
        return web.json_response({"error": str(e)}, status=500)

async def handle_export_prompt_items_stream(request: web.Request) -> web.Response:
    try:
        # 解析查询参数
        params = ExportPromptItemsQuery(**request.query)
        task_id = params.taskId

        # 校验排序字段，防止SQL注入
        allowed_fields = {'created_at', 'updated_at', 'name', 'customize_name', 'rating'}
        order_by = params.sort_field if params.sort_field in allowed_fields else 'created_at'

        allowed_dirs = {'ASC', 'DESC'}
        order_dir = params.sort_order.upper() if params.sort_order.upper() in allowed_dirs else 'DESC'

        # 构建查询条件
        where = []
        query_params = []

        if params.class_type:
            where.append("class_type = ?")
            query_params.append(params.class_type)

        if params.id is not None and params.id != '':
            where.append("id = ?")
            query_params.append(params.id)

        if params.name:
            where.append("name LIKE ?")
            query_params.append(f'%{params.name}%')

        if params.customize_name:
            where.append("customize_name LIKE ?")
            query_params.append(f'%{params.customize_name}%')

        if params.tags:
            where.append("tags LIKE ?")
            query_params.append(f'%{params.tags}%')

        if params.min_rating is not None:
            where.append("rating >= ?")
            query_params.append(params.min_rating)

        if params.max_rating is not None:
            where.append("rating <= ?")
            query_params.append(params.max_rating)

        if params.favorite is not None:
            where.append("favorite = ?")
            query_params.append(1 if params.favorite else 0)

        if params.disliked is not None:
            where.append("disliked = ?")
            query_params.append(1 if params.disliked else 0)

        where_clause = f"WHERE {' AND '.join(where)}" if where else ""

        # 构建基础SQL
        base_sql = f"""
            SELECT
                id, class_type, name, customize_name,
                tags, rating, favorite, disliked, created_at
            FROM prompt_items
            {where_clause}
            ORDER BY {order_by} {order_dir}
        """

        # 查询总数
        count_sql = f"SELECT COUNT(*) AS total FROM prompt_items {where_clause}"
        with db_manager.get_connection() as conn:
            count_cursor = conn.execute(count_sql, query_params)
            total = count_cursor.fetchone()[0] or 0

        if total == 0:
            return web.json_response({"error": "No data to export"}, status=404)

        # 创建响应流
        response = web.StreamResponse()
        filename = f"prompt_items_export_{int(datetime.now().timestamp())}.csv"
        response.headers["Content-Type"] = "text/csv; charset=utf-8"
        response.headers["Content-Disposition"] = f'attachment; filename="{filename}"'
        await response.prepare(request)

        # 初始化任务
        task_manager.create_task(task_id, total)

        # 创建CSV writer，使用StringIO作为缓冲区
        fieldnames = ['id', 'class_type', 'name', 'customize_name', 'tags',
                      'rating', 'favorite', 'disliked', 'created_at']

        # 写入CSV头部
        buffer = io.StringIO()
        csv_writer = csv.DictWriter(buffer, fieldnames=fieldnames)
        csv_writer.writeheader()
        buffer.seek(0)
        await response.write(buffer.getvalue().encode('utf-8'))

        # 流式分页导出
        page_size = 1000
        page = 1
        processed = 0

        try:
            # 复用数据库连接，减少连接开销
            with db_manager.get_connection() as conn:
                conn.row_factory = sqlite3.Row

                while processed < total:
                    offset = (page - 1) * page_size
                    data_sql = f"{base_sql} LIMIT ? OFFSET ?"
                    data_params = query_params + [page_size, offset]

                    cursor = conn.execute(data_sql, data_params)
                    data = [dict(row) for row in cursor.fetchall()]

                    for row in data:
                        # 格式化数据，处理可能的异常
                        try:
                            created_at = datetime.fromisoformat(row['created_at']).isoformat() if row[
                                'created_at'] else ''
                        except ValueError:
                            created_at = ''  # 处理无效的日期格式

                        csv_row = {
                            'id': row['id'],
                            'class_type': row['class_type'],
                            'name': row['name'] or '',  # 处理None值
                            'customize_name': row['customize_name'] or '',
                            'tags': row['tags'] or '',
                            'rating': row['rating'] or '',
                            'favorite': 'Yes' if row['favorite'] else 'No',
                            'disliked': 'Yes' if row['disliked'] else 'No',
                            'created_at': created_at
                        }

                        # 写入CSV行
                        buffer = io.StringIO()
                        csv_writer = csv.DictWriter(buffer, fieldnames=fieldnames)
                        csv_writer.writerow(csv_row)
                        buffer.seek(0)
                        await response.write(buffer.getvalue().encode('utf-8'))

                    processed += len(data)
                    page += 1
                    task_manager.update_progress(task_id, processed)

                    # 让出事件循环给其他任务
                    await asyncio.sleep(0)

            task_manager.complete_task(task_id)
            await response.write_eof()
            return response

        except Exception as e:
            task_manager.cancel_task(task_id)
            # 记录详细错误日志
            request.app.logger.error(f"Export failed: {str(e)}", exc_info=True)
            raise e

    except ValueError as e:
        return web.json_response({"error": str(e)}, status=400)
    except Exception as e:
        # 记录未捕获的异常
        request.app.logger.error(f"Unexpected error: {str(e)}", exc_info=True)
        return web.json_response({"error": "Internal server error"}, status=500)

async def handle_export_translate_stream(request: web.Request) -> web.Response:
    try:
        # 解析查询参数
        params = ExportTranslateQuery(**request.query)
        task_id = params.taskId

        # 校验排序字段
        allowed_fields = ['en', 'cn', 'hit_count', 'created_at', 'updated_at']
        order_by = params.sort_field if params.sort_field in allowed_fields else 'en'
        order_dir = params.sort_order.upper() if params.sort_order.upper() in ['ASC', 'DESC'] else 'ASC'

        # 构建查询条件
        where = []
        query_params = []

        if params.en:
            where.append("en LIKE ?")
            query_params.append(f'%{params.en}%')

        if params.cn:
            where.append("cn LIKE ?")
            query_params.append(f'%{params.cn}%')

        if params.hit_count_min is not None:
            where.append("hit_count >= ?")
            query_params.append(params.hit_count_min)

        if params.hit_count_max is not None:
            where.append("hit_count <= ?")
            query_params.append(params.hit_count_max)

        where_clause = f"WHERE {' AND '.join(where)}" if where else ""

        # 构建基础SQL
        base_sql = f"""
            SELECT
                en, cn, hit_count,
                created_at, updated_at
            FROM translate
            {where_clause}
            ORDER BY {order_by} {order_dir}
        """

        # 查询总数
        count_sql = f"SELECT COUNT(*) AS total FROM translate {where_clause}"
        with db_manager.get_connection() as conn:
            count_cursor = conn.execute(count_sql, query_params)
            total = count_cursor.fetchone()[0] or 0

        if total == 0:
            return web.json_response({"error": "没有可导出的数据"}, status=404)

        # 创建响应流
        response = web.StreamResponse()
        filename = f"translates_export_{int(datetime.now().timestamp())}.csv"
        response.headers["Content-Type"] = "text/csv; charset=utf-8"
        response.headers["Content-Disposition"] = f'attachment; filename="{filename}"'
        await response.prepare(request)  # 准备响应，这之后不能再返回其他响应类型

        # 初始化任务
        task_manager.create_task(task_id, total)

        try:
            # 使用StringIO处理CSV内容
            from io import StringIO

            # 写入表头
            header_buffer = StringIO()
            csv_writer = csv.DictWriter(
                header_buffer,
                fieldnames=['en', 'cn', 'hit_count', 'created_at', 'updated_at']
            )
            csv_writer.writeheader()
            header_content = header_buffer.getvalue()
            await response.write(header_content.encode('utf-8'))

            # 流式分页导出
            page_size = 1000
            page = 1
            processed = 0

            while processed < total:
                offset = (page - 1) * page_size
                data_sql = f"{base_sql} LIMIT ? OFFSET ?"
                data_params = query_params + [page_size, offset]

                with db_manager.get_connection() as conn:
                    conn.row_factory = sqlite3.Row
                    cursor = conn.execute(data_sql, data_params)
                    data = [dict(row) for row in cursor.fetchall()]

                # 处理当前页数据
                row_buffer = StringIO()
                row_writer = csv.DictWriter(
                    row_buffer,
                    fieldnames=['en', 'cn', 'hit_count', 'created_at', 'updated_at']
                )

                for row in data:
                    # 格式化数据
                    csv_row = {
                        'en': row['en'],
                        'cn': row['cn'],
                        'hit_count': row['hit_count'],
                        'created_at': datetime.fromisoformat(row['created_at']).isoformat() if row['created_at'] else '',
                        'updated_at': datetime.fromisoformat(row['updated_at']).isoformat() if row['updated_at'] else ''
                    }
                    row_writer.writerow(csv_row)

                # 将当前页数据写入响应流
                row_content = row_buffer.getvalue()
                await response.write(row_content.encode('utf-8'))

                processed += len(data)
                page += 1
                task_manager.update_progress(task_id, processed)

                # 让出事件循环
                await asyncio.sleep(0)

            task_manager.complete_task(task_id)
            await response.write_eof()  # 确保响应流正确结束
            return response

        except Exception as e:
            task_manager.cancel_task(task_id)
            # 流已经开始，通过流发送错误信息
            await response.write(f"Error: {str(e)}".encode('utf-8'))
            await response.write_eof()
            return response

    except ValueError as e:
        return web.json_response({"error": str(e)}, status=400)
    except Exception as e:
        # 在响应准备之前的错误，可以返回JSON
        return web.json_response({"error": str(e)}, status=500)

async def handle_export_tags_stream(request: web.Request) -> web.Response:
    try:
        # 解析查询参数
        params = ExportTagsQuery(**request.query)
        task_id = params.taskId

        if not task_id:
            return web.json_response({"error": "taskId is required"}, status=400)

        # 构建查询条件
        where = []
        query_params = []

        if params.group:
            where.append("tg.group_name = ?")
            query_params.append(params.group)

        if params.name:
            where.append("tg.name = ?")
            query_params.append(params.name)

        or_conditions = []
        if params.tag:
            or_conditions.append("t.tag LIKE ?")
            query_params.append(f'%{params.tag}%')

        if params.description:
            or_conditions.append("t.tag_des LIKE ?")
            query_params.append(f'%{params.description}%')

        if or_conditions:
            where.append(f"({' OR '.join(or_conditions)})")

        # 构建基础SQL
        base_sql = """
            SELECT
                tg.group_name AS "group",
                tg.name,
                t.tag AS value,
                t.tag_des AS label
            FROM tags t
            JOIN tag_groups tg ON t.tag_group_id = tg.id
        """

        if where:
            base_sql += f" WHERE {' AND '.join(where)}"

        # 异步执行数据库查询（使用线程池避免阻塞事件循环）
        async def run_db_query(sql: str, params: List) -> List[Dict]:
            """在线程池中执行同步数据库操作"""
            loop = asyncio.get_event_loop()
            return await loop.run_in_executor(
                None,
                lambda: _sync_db_query(sql, params)
            )

        def _sync_db_query(sql: str, params: List) -> List[Dict]:
            """同步数据库查询执行函数"""
            with db_manager.get_connection() as conn:
                conn.row_factory = sqlite3.Row
                cursor = conn.execute(sql, params)
                return [dict(row) for row in cursor.fetchall()]

        # 查询总数
        count_sql = f"SELECT COUNT(*) AS total FROM ({base_sql}) AS subquery"
        count_result = await run_db_query(count_sql, query_params)
        total = count_result[0]['total'] if count_result else 0

        if total == 0:
            return web.json_response({"error": "No data to export"}, status=404)

        # 创建响应流
        response = web.StreamResponse()
        filename = f"tags_export_{int(datetime.now().timestamp())}.csv"
        response.headers["Content-Type"] = "text/csv; charset=utf-8"
        response.headers["Content-Disposition"] = f'attachment; filename="{filename}"'
        await response.prepare(request)

        # 初始化任务
        task_manager.create_task(task_id, total)

        # 写入CSV表头
        fieldnames = ['group', 'name', 'value', 'label']
        header_buffer = io.StringIO()
        csv_writer = csv.DictWriter(header_buffer, fieldnames=fieldnames)
        csv_writer.writeheader()
        await response.write(header_buffer.getvalue().encode('utf-8'))

        # 流式分页导出
        page_size = 1000
        page = 1
        processed = 0

        try:
            while processed < total:
                offset = (page - 1) * page_size
                data_sql = f"{base_sql} LIMIT ? OFFSET ?"
                data_params = query_params + [page_size, offset]

                # 获取分页数据
                data = await run_db_query(data_sql, data_params)
                if not data:
                    break  # 没有更多数据时退出循环

                # 写入当前页数据
                row_buffer = io.StringIO()
                row_writer = csv.DictWriter(row_buffer, fieldnames=fieldnames)
                for row in data:
                    # 确保所有字段都存在且为字符串类型
                    csv_row = {
                        'group': str(row.get('group', '')),
                        'name': str(row.get('name', '')),
                        'value': str(row.get('value', '')),
                        'label': str(row.get('label', ''))
                    }
                    row_writer.writerow(csv_row)

                # 将缓冲区内容写入响应流
                await response.write(row_buffer.getvalue().encode('utf-8'))

                # 更新进度
                processed += len(data)
                page += 1
                task_manager.update_progress(task_id, processed)

                # 让出事件循环给其他任务
                await asyncio.sleep(0.01)

            # 完成任务并结束响应
            task_manager.complete_task(task_id)
            await response.write_eof()
            return response

        except Exception as e:
            task_manager.cancel_task(task_id, str(e))
            # 确保响应流正确关闭
            if not response.closed:
                await response.write_eof()
            raise e

    except ValueError as e:
        return web.json_response({"error": str(e)}, status=400)
    except Exception as e:
        # 记录详细错误日志
        request.app.logger.error(f"Export tags error: {str(e)}", exc_info=True)
        return web.json_response({"error": "Internal server error"}, status=500)

async def handle_get_task_progress(request: web.Request) -> web.Response:
    try:
        task_id = request.match_info['taskId']
        if not task_id:
            return web.json_response({"error": "taskId不能为空"}, status=400)

        progress = task_manager.get_progress(task_id)
        return web.json_response({
            "taskId": task_id,
            "progress": progress
        })

    except Exception as e:
        return web.json_response({"error": str(e)}, status=500)




# 路由配置
routes = [
    # 导入角色CSV
    web.post("/import/character-csv", handle_import_character_csv),
    # 导出角色CSV
    web.get("/sql/export_character_stream", handle_export_character_stream),
    # 导出prompt_items CSV
    web.get("/sql/export_prompt_items_stream", handle_export_prompt_items_stream),
    # 导入prompt_items CSV
    web.post("/import/prompt-items-csv", handle_import_prompt_items_csv),
    # 导入翻译CSV
    web.post("/import/translate-csv", handle_import_translate_csv),
    # 导出翻译CSV
    web.get("/sql/export_translate_stream", handle_export_translate_stream),
    # 导出标签CSV
    web.get("/sql/export_tags_stream", handle_export_tags_stream),
    # 导入标签CSV
    web.post("/import/tags-csv", handle_import_tags_csv),
    # 获取任务进度
    web.get("/progress/{taskId}", handle_get_task_progress),
]
