import uuid
from datetime import datetime
from time import sleep
from src.label import label_logger as logging
import time

from src.label.label_models import VvvApiCallRecord
import chardet
import pandas as pd
from sqlalchemy.orm import Session
from sqlalchemy.dialects.postgresql import insert

from src.config.database import SessionPgTest
from src.label.label_schemas import VvvApiCallRecordCreate


def safe_int(value, default=0):
    try:
        return int(value)
    except (TypeError, ValueError):
        return default


def safe_datetime(value, default=None):
    return datetime.strptime(str(value), "%Y%m%d")


def load_csv(file_path, chunk_size=10000, max_rows=1000000):
    """
    流式读取 CSV 文件并写入数据库
    :param file_path: CSV 文件路径
    :param chunk_size: 每次读取行数
    :param max_rows: 最多读取行数（None 表示读取全部剩余行）
    """
    db: Session = SessionPgTest()
    # 1. 自动检测文件编码
    with open(file_path, 'rb') as f:
        result = chardet.detect(f.read(10000))
    encoding = result.get('encoding', 'utf-8')
    logging.info(f"使用编码格式：{encoding} 读取文件")

    # 2. 流式分块读取 CSV（每次读取最多 10000 行）
    total_processed = 0  # 已处理总行数
    circle_count = 0  # 数据处理轮次

    for chunk in pd.read_csv(file_path, encoding=encoding, chunksize=chunk_size, on_bad_lines='skip',
                             low_memory=False):
        circle_count += 1
        logging.info(f"正在执行第 {circle_count} 轮次，本轮将读取 {chunk.shape[0]} 行数据...")
        db_objects = []
        start_time = time.time()
        for _, row in chunk.iterrows():
            if total_processed >= max_rows:
                logging.info(f"已达到最大行数限制：{max_rows}，停止处理。")
                break

            try:
                data = VvvApiCallRecordCreate(
                    id=str(uuid.uuid4()),
                    company_type=str(row.get('company_type')) if pd.notna(row.get('company_type')) else None,
                    user_id=str(row.get('usr_id')) if pd.notna(row.get('usr_id')) else None,
                    access_date=safe_datetime(row.get('access_date')) if pd.notna(
                        row.get('access_date')) else None,
                    access_times=safe_int(row.get('access_times')) if pd.notna(row.get('access_times')) else 0,
                    api_name=str(row.get('api_name')) if pd.notna(row.get('api_name')) else None,
                    resolved=0
                )
                data = data.model_dump()
                # 转换为 ORM 对象
                db_obj = convert_to_vvv_api_call_record(data)
                db_objects.append(db_obj)
                total_processed += 1

            except Exception as e:
                logging.error(f"跳过错误行：{row.to_dict()}，错误：{e}")
                continue
        csv_read_and_convert_time = time.time()
        logging.info(f"第 {circle_count} 轮次数据已提取和转换完成，耗时{csv_read_and_convert_time - start_time:.2f}秒")
        # 4. 批量插入数据库
        if db_objects:
            # stmt = insert(VvvApiCallRecord).on_conflict_do_nothing(index_elements=['id'])
            stmt = insert(VvvApiCallRecord)
            BATCH_SIZE = 10000  # 每批提交数量
            commit_count = 0  # 提交次数
            for i in range(0, len(db_objects), BATCH_SIZE):
                commit_start_time = time.time()  # 提交开始时间
                commit_count += 1
                batch = db_objects[i:i + BATCH_SIZE]
                try:
                    db.execute(stmt, [obj.to_dict() for obj in batch])
                    db.commit()
                    commit_end_time = time.time() # 提交结束时间
                    logging.info(
                        f"已提交第 {circle_count} 轮次，第{commit_count} 批次数据，共提交 {len(batch)} 条记录，耗时{commit_end_time - commit_start_time:.2f}秒")
                except Exception as e:
                    db.rollback()
                    logging.error(f"提交失败，错误：{e}")
                    sleep(1)  # 等待重试

        if total_processed >= max_rows:
            break
        end_time = time.time()
        logging.info(
            f"==================本轮耗时 {end_time - start_time:.2f} 秒，目前已处理 {total_processed} 行数据==================")


def convert_to_vvv_api_call_record(data: VvvApiCallRecordCreate) -> VvvApiCallRecord:
    # 提取 sys.query 和 answer
    res = VvvApiCallRecord(
        id=data.get('id'),
        company_type=data.get('company_type'),
        user_id=data.get('user_id'),
        access_date=data.get('access_date'),
        access_times=data.get('access_times'),
        api_name=data.get('api_name'),
        resolved=0
    )
    return res


if __name__ == '__main__':
    input_file = 'D:\\2-work\\25-创意组\\标签用户体系\\标签体系原始数据\\segmentation\\船视宝2025年用户接口访问记录\\part2\\船视宝2025年用户接口访问记录-part-2-part-2.csv'
    load_csv(input_file)
