from datetime import datetime
import json
from src.label.label_models import HiDolphinQaData
import chardet
import pandas as pd
from sqlalchemy.orm import Session
from sqlalchemy.dialects.postgresql import insert
import os
from src.label import label_logger as logging
from src.config.database import SessionPgTest
from src.label.label_schemas import HiDolphinQaDataWDTO


def safe_parse_list(value, default=None):
    try:
        if isinstance(value, str) and value.strip():
            return json.loads(value)
        return default if default is not None else []
    except (json.JSONDecodeError, TypeError):
        return default if default is not None else []


def safe_float(value, default=0.0):
    try:
        if isinstance(value, (int, float)):
            return float(value)
        elif isinstance(value, str) and value.strip():
            return float(value)
        return default
    except (ValueError, TypeError):
        return default


def safe_int(value, default=0):
    try:
        return int(value)
    except (TypeError, ValueError):
        return default


def safe_datetime(value, default=None):
    if isinstance(value, datetime):
        return value
    if isinstance(value, str):
        try:
            # 尝试解析常见时间格式
            return datetime.fromisoformat(value.replace("Z", "+00:00"))
        except ValueError:
            try:
                return datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
            except ValueError:
                pass
    return default


def load_csv(file_path, chunk_size=10000, max_rows=1000000):
    """
    流式读取 CSV 文件并写入数据库
    :param file_path: CSV 文件路径
    :param chunk_size: 每次读取行数
    :param start_row: 起始行号（默认为 0）
    :param max_rows: 最多读取行数（None 表示读取全部剩余行）
    """
    db: Session = SessionPgTest()
    # 1. 自动检测文件编码
    with open(file_path, 'rb') as f:
        result = chardet.detect(f.read(10000))
    encoding = result.get('encoding', 'gbk')
    logging.info(f"使用编码格式：{encoding} 读取文件")

    # 2. 流式分块读取 CSV（每次读取最多 10000 行）
    total_processed = 0  # 已处理总行数

    file_ext = os.path.splitext(file_path)[1].lower()

    logging.info(f"开始处理文件：{file_path}，类型：{file_ext}")

    if file_ext == '.csv':
        # CSV 文件处理逻辑
        with open(file_path, 'rb') as f:
            result = chardet.detect(f.read(10000))
        encoding = result.get('encoding', 'gbk')
        logging.info(f"使用编码格式：{encoding} 读取 CSV 文件")

        reader = pd.read_csv(file_path, encoding=encoding, chunksize=chunk_size, on_bad_lines='skip', low_memory=False)
    else:
        return

    for chunk in reader:
        logging.info(f"正在处理 {chunk.shape[0]} 行数据...")
        db_objects = []
        for _, row in chunk.iterrows():
            if total_processed >= max_rows:
                logging.info(f"已达到最大行数限制：{max_rows}，停止处理。")
                break

            try:
                data = HiDolphinQaDataWDTO(
                    id=str(row.get('id')),
                    user_id=str(row.get('userId')),
                    project_id=str(row.get('projectId')),
                    timestamp=safe_datetime(row.get('timestamp')),
                    tags=safe_parse_list(row.get('tags')),  # 转换为 list
                    bookmarked=bool(row.get('bookmarked')),
                    name=str(row.get('name')) if pd.notna(row.get('name')) else None,
                    release=str(row.get('release')) if pd.notna(row.get('release')) else None,
                    version=str(row.get('version')) if pd.notna(row.get('version')) else None,
                    session_id=str(row.get('sessionId')) if pd.notna(row.get('sessionId')) else None,
                    public=bool(row.get('public')),
                    input=str(row.get('input')) if pd.notna(row.get('input')) else None,
                    output=str(row.get('output')) if pd.notna(row.get('output')) else None,
                    metadata=str(row.get('metadata')) if pd.notna(row.get('metadata')) else None,
                    latency=safe_float(row.get('latency')),
                    usage=str(row.get('usage')) if pd.notna(row.get('usage')) else None,
                    input_cost=safe_float(row.get('inputCost')),
                    output_cost=safe_float(row.get('outputCost')),
                    total_cost=safe_float(row.get('totalCost')),
                    level=str(row.get('level')) if pd.notna(row.get('level')) else None,
                    error_count=int(row.get('errorCount', 0)),
                    warning_count=int(row.get('warningCount', 0)),
                    debug_count=int(row.get('debugCount', 0)),
                    observation_count=int(row.get('observationCount', 0)),
                    input_tokens=int(row.get('inputTokens', 0)),
                    output_tokens=int(row.get('outputTokens', 0)),
                    total_tokens=int(row.get('totalTokens', 0)),
                    default_count=int(row.get('defaultCount', 0))
                )
                data = data.model_dump()
                # 转换为 ORM 对象
                db_obj = convert_to_hi_dolphin_qa_data(data)
                db_objects.append(db_obj)
                total_processed += 1
            except Exception as e:
                logging.error(f"跳过错误行：{row.to_dict()}，错误：{e}")
                continue
        # 4. 批量插入数据库
        if db_objects:
            # 使用能够忽略id冲突的方法
            # db.add_all(db_objects)
            stmt = insert(HiDolphinQaData).on_conflict_do_nothing(index_elements=['id'])
            db.execute(stmt, [obj.to_dict() for obj in db_objects])
            db.commit()
            logging.info(f"已批量插入（忽略冲突）{len(db_objects)} 条记录")
            db_objects.clear()

        if total_processed >= max_rows:
            break


def convert_to_hi_dolphin_qa_data(data: HiDolphinQaDataWDTO) -> HiDolphinQaData:
    input_str = data.get("input")
    output_str = data.get("output")
    # 解析 input 字段
    input_data = {}
    try:
        input_data = json.loads(input_str) if input_str else {}
    except json.JSONDecodeError:
        pass

    # 解析 output 字段
    output_data = {}
    try:
        output_data = json.loads(output_str) if output_str else {}
    except json.JSONDecodeError:
        pass

    # 提取 sys.query 和 answer
    query = input_data.get("sys.query")
    answer = output_data.get("answer")
    res = HiDolphinQaData(
        id=data.get("id"),
        user_id=data.get("user_id"),
        project_id=data.get("project_id"),
        query=query,  # 从 input 提取
        answer=answer,  # 从 output 提取
        input_tokens=data.get("input_tokens"),
        output_tokens=data.get("output_tokens"),
        total_tokens=data.get("total_tokens"),
        timestamp=data.get("timestamp"),
        resolved=0  # 可选：判断是否已回答
    )
    return res


if __name__ == '__main__':
    input_file = 'D:\\2-work\\25-创意组\\标签用户体系\\标签体系原始数据\\1740108405166-lf-traces-export-cm70928mw01wqad06w925cusk.xlsx'
    load_csv(input_file)
