import io
import csv
import time
import pandas as pd
import asyncpg
from typing import Dict, List, Optional,Union,Literal
# import warnings
import logging

logger = logging.getLogger("pgtool")


async def write_topgsql_async(
    df: pd.DataFrame,           # 要写入的数据。
    schema:str,                 # 数据库模式名称。
    table: str,                 # 表名。
    conn: asyncpg.Connection,   # 连接对象。
    create: bool = False,       # 如果表不存在，是否创建表。默认为 False。
    update:  bool = False,  
    add_columns: bool = False,  # 是否添加列
    only_column: bool = False,  # 是否只保留数据库中已存在的列（优先级高于 add_columns）。
    write: Optional[Literal['auto','copy','execute','executemany','executemany_num']]= 'auto',    # 是否使用 COPY 模式写入数据。默认为 False,但当数据量较大时，会自动切换到 COPY 模式。
):
    """
    异步将 DataFrame 写入 PostgreSQL 表，支持多种高级选项。
    注意：此函数使用 asyncpg，conn_params 应兼容 asyncpg（如 'database' 而非 'dbname'）
    parms
        :df (pd.DataFrame): 要写入的数据。\n
        :schema (str): 数据库模式名称。
        :table (str): 表名。
        :conn (asyncpg.Connection): 连接对象。
        :create (bool, optional): 如果表不存在，是否创建表。默认为 False。
        :update (bool, optional): 是否更新表。默认为 False。
        :add_columns (bool, optional): 是否添加列。默认为 False。
        :only_column (bool, optional): 是否只保留数据库中已存在的列（优先级高于 add_columns）。默认为 False。
        :write (Optional[Literal['auto','copy','execute','executemany','executemany_num']]): copy 大数据量写入。\n
            execute 小数数据量写入。\n
            executemany 中等数据量写入,但不会返回写入数量。\n
            executemany_num 中等数据量写入，使用临时表方式返回写入数量。\n
            auto 模式根据数据量自动选择'copy'或者executemany。
        :return: None
    """
    start_time = time.time()
    if df.empty:
        logger.warning("DataFrame为空,跳过写入")
        return
    df = df.where(pd.notnull(df), None) # 把 NaN 转成 None，否则 asyncpg 无法处理
    logger.info(f"✅ 开始写入数据到 {schema}.{table}")

    async with conn.transaction():
        logger.debug(f"获取表 {schema}.{table}列和字段类型")
        # 获取目标表结构
        db_columns_info = await conn.fetch("""
            SELECT column_name, data_type
            FROM information_schema.columns
            WHERE table_schema = $1 AND table_name = $2
            ORDER BY ordinal_position;
        """, schema, table)

        db_columns = [row['column_name'] for row in db_columns_info]
        db_column_types = {row['column_name']: row['data_type'] for row in db_columns_info}
        table_exists = len(db_columns) > 0

        dtype_mapping = {
            'int64': 'BIGINT',
            'int32': 'INTEGER',
            'float64': 'DOUBLE PRECISION',
            'float32': 'REAL',
            'bool': 'BOOLEAN',
            'object': 'varchar',
            'datetime64[ns]': 'TIMESTAMP',
            'timedelta64[ns]': 'INTERVAL'
        }

        if not table_exists:
            logger.debug(f"表 {schema}.{table} 不存在")
            if create:

                logger.debug(f"开始创建表 {schema}.{table}")
                cols_def = []
                for col in df.columns:
                    pd_type = str(df[col].dtype)
                    pg_type = dtype_mapping.get(pd_type, 'TEXT')
                    logger.debug(f"{col}: {pd_type} -> {pg_type}")
                    # 安全转义列名
                    cols_def.append(f'"{col}" {pg_type}')
                full_table = f'"{schema}"."{table}"'
                create_sql = f"CREATE TABLE {full_table} ({', '.join(cols_def)})"
                await conn.execute(create_sql)
                db_columns = list(df.columns)
                db_column_types = {col: 'TEXT' for col in df.columns}
            else:
                raise ValueError(f"""Table "{schema}"."{table}" does not exist and create=False""")

        # 字段对齐
        df_cols = list(df.columns)
        if only_column:
            logger.debug(f"仅保存表 {schema}.{table} 已存在的列")
            common_cols = [c for c in df_cols if c in db_columns]
            df = df[common_cols]
            df_cols = common_cols
        elif add_columns:
            new_cols = [c for c in df_cols if c not in db_columns]
            full_table = f'"{schema}"."{table}"'
            for col in new_cols:
                logger.debug(f"添加表 {schema}.{table} 缺少的列{col}")
                pd_type = str(df[col].dtype)
                pg_type = dtype_mapping.get(pd_type, 'TEXT')
                await conn.execute(f'ALTER TABLE {full_table} ADD COLUMN "{col}" {pg_type}')
            db_columns.extend(new_cols)
            db_column_types.update({col: dtype_mapping.get(str(df[col].dtype), 'TEXT') for col in new_cols})
        else:
            extra = set(df_cols) - set(db_columns)
            if extra:
                raise ValueError(f"列 {extra} 不在目标表中，且 add_columns=False")

        # 获取主键
        logger.debug(f"获取表 {schema}.{table} 的主键字段")
        pk_rows = await conn.fetch("""
            SELECT kcu.column_name
            FROM information_schema.table_constraints tc
            JOIN information_schema.key_column_usage kcu
              ON tc.constraint_name = kcu.constraint_name
             AND tc.table_schema = kcu.table_schema
            WHERE tc.constraint_type = 'PRIMARY KEY'
              AND tc.table_schema = $1
              AND tc.table_name = $2
            ORDER BY kcu.ordinal_position;
        """, schema, table)
        pk_cols = [row['column_name'] for row in pk_rows]

        if pk_cols:
            logger.debug(f"表 {schema}.{table} 的主键字段为 {pk_cols}")
            if not set(pk_cols).issubset(set(df.columns)):
                raise ValueError(f"❌ 主键列{pk_cols}并未全部存在于DataFrame中")
            if df[pk_cols].duplicated().any():
                raise ValueError("❌ DataFrame在主键字段上包含重复行")
        else:
            logger.debug(f"⚠️ 表 {schema}.{table} 没有主键字段")

        # 插入数据到临时表
        nrows, ncols = df.shape

        if write == "auto":
            if nrows * ncols > 65535:
                write = "copy"
            else:
                write = "executemany_num"

        if write == "copy":
            logger.info("使用 COPY 模式写入数据")
            # 创建临时表
            temp_table = f"_temp_{table}_{pd.Timestamp.now().strftime('%Y%m%d%H%M%S%f')}"
            # await conn.execute(f'CREATE TEMP TABLE "{temp_table}" (LIKE "{schema}"."{table}" INCLUDING ALL)')
            await conn.execute(f'CREATE TEMP TABLE "{temp_table}" AS SELECT * FROM "{schema}"."{table}" WHERE 1=0 ')

            # === 使用 BytesIO  ===
            with io.BytesIO() as buffer:
                logger.debug(f"开始准备临时表数据....")
                df_subset = df[df_cols].copy()
                
                # # 处理特殊数据类型
                # for col in df_subset.select_dtypes(include=['datetime64']).columns:
                #     df_subset[col] = df_subset[col].dt.strftime('%Y-%m-%d %H:%M:%S')
                
                # 转换为 CSV 并写入字节缓冲区
                csv_content = df_subset.to_csv(
                    index=False, 
                    header=False, 
                    sep='\t',
                    na_rep=r'\N',
                    quoting=csv.QUOTE_MINIMAL
                )
                buffer.write(csv_content.encode('utf-8'))
                buffer.seek(0)

                logger.debug(f"开始写入数据到临时表{temp_table}")
                await conn.copy_to_table(
                    table_name=temp_table,
                    source=buffer,
                    columns=df_cols,
                    format='csv',
                    delimiter='\t',
                    null=r'\N'
                )

            temp_count = await conn.fetchval(f'SELECT COUNT(*) FROM "{temp_table}"')
            logger.info(f"临时表 {temp_table} 中有 {temp_count} 行数据")

            # 合并到主表
            quoted_df_cols = [f'"{c}"' for c in df_cols]
            quoted_pk_cols = [f'"{c}"' for c in pk_cols]

            if not pk_cols:
                logger.debug(f"表 {schema}.{table} 无主键字段，直接写入全量数据")
                result = await conn.execute(f'INSERT INTO "{schema}"."{table}" SELECT * FROM "{temp_table}"')
            else:

                if update:
                    logger.debug(f"主键冲突时update数据到 {schema}.{table}")
                    update_set = ', '.join([f'"{col}" = EXCLUDED."{col}"' for col in df_cols if col not in pk_cols])
                    conflict_action = f"DO UPDATE SET {update_set}"
                else :
                    logger.debug(f"主键冲突时ignore写入到 {schema}.{table}")
                    conflict_action = "DO NOTHING"

                upsert_sql = f"""
                    INSERT INTO "{schema}"."{table}" ({', '.join(quoted_df_cols)})
                    SELECT {', '.join(quoted_df_cols)} FROM "{temp_table}"
                    ON CONFLICT ({', '.join(quoted_pk_cols)}) {conflict_action}
                """
                result = await conn.execute(upsert_sql)
                # logger.debug(f"✅ Upsert SQL: {upsert_sql}")
            count = int(result.split()[-1])
            logger.info(f"✅ {count} 行已更新")

        elif write == "executemany":
            # 使用普通 INSERT / UPSERT（批量）
            logger.info("使用普通 INSERT 模式（executemany）写入数据")

            quoted_df_cols = [f'"{c}"' for c in df_cols]
            placeholders = ', '.join([f'${i+1}' for i in range(len(df_cols))])

            if not pk_cols:
                # 无主键：直接插入所有行
                logger.debug(f"表 {schema}.{table} 无主键字段，直接写入全量数据")
                insert_sql = f'INSERT INTO "{schema}"."{table}" ({", ".join(quoted_df_cols)}) VALUES ({placeholders})'
                values_list = [tuple(row[c] for c in df_cols) for _, row in df.iterrows()]
                resutl = await conn.executemany(insert_sql, values_list)
                total_inserted = len(values_list)
                logger.info(f"✅ 插入 {total_inserted} 行到 {schema}.{table} {resutl}")
            else:
                # 有主键：需要处理冲突（DO NOTHING 或 DO UPDATE）
                if update:
                    logger.debug(f"主键冲突时update数据到 {schema}.{table}")
                    update_set = ', '.join([f'"{col}" = EXCLUDED."{col}"' for col in df_cols if col not in pk_cols])
                    conflict_action = f"DO UPDATE SET {update_set}"
                else:
                    logger.debug(f"主键冲突时ignore写入到 {schema}.{table}")
                    conflict_action = "DO NOTHING"

                upsert_sql = f"""
                    INSERT INTO "{schema}"."{table}" ({", ".join(quoted_df_cols)})
                    VALUES ({placeholders})
                    ON CONFLICT ({", ".join([f'"{c}"' for c in pk_cols])}) {conflict_action}
                """
                values_list = [tuple(row[c] for c in df_cols) for _, row in df.iterrows()]
                # 注意：asyncpg 的 executemany 不返回影响行数，因此无法精确统计
                resutl = await conn.executemany(upsert_sql, values_list)
                count = None
                total_upserted = len(values_list)  # 这只是一个近似值（实际可能因冲突而少）
                logger.info(f"✅ Upsert 完成，尝试写入 {total_upserted} 行（实际影响行数可能因冲突而不同）{resutl}")

        elif write == "execute":
            # 使用普通 INSERT / UPSERT（逐条）
            logger.info("使用普通 INSERT 模式(execute)写入数据")

            quoted_df_cols = [f'"{c}"' for c in df_cols]
            placeholders = ', '.join([f'${i+1}' for i in range(len(df_cols))])

            if not pk_cols:
                # 无主键：直接插入所有行
                logger.debug(f"表 {schema}.{table} 无主键字段，直接写入全量数据")
                insert_sql = f'INSERT INTO "{schema}"."{table}" ({", ".join(quoted_df_cols)}) VALUES ({placeholders})'
                total_inserted = 0
                for _, row in df.iterrows():
                    values = tuple(row[c] for c in df_cols)
                    await conn.execute(insert_sql, *values)
                    total_inserted += 1
                logger.info(f"✅ 插入 {total_inserted} 行到 {schema}.{table}")
            else:
                # 有主键：需要处理冲突（DO NOTHING 或 DO UPDATE）
                if update:
                    logger.debug(f"主键冲突时update数据到 {schema}.{table}")
                    update_set = ', '.join([f'"{col}" = EXCLUDED."{col}"' for col in df_cols if col not in pk_cols])
                    conflict_action = f"DO UPDATE SET {update_set}"
                else:
                    logger.debug(f"主键冲突时ignore写入到 {schema}.{table}")
                    conflict_action = "DO NOTHING"

                upsert_sql = f"""
                    INSERT INTO "{schema}"."{table}" ({", ".join(quoted_df_cols)})
                    VALUES ({placeholders})
                    ON CONFLICT ({", ".join([f'"{c}"' for c in pk_cols])}) {conflict_action}
                """
                count  = 0
                for _, row in df.iterrows():
                    values = tuple(row[c] for c in df_cols)
                    result = await conn.execute(upsert_sql, *values)
                    # asyncpg 返回如 "INSERT 0 1" 或 "UPDATE 1"
                    try:
                        count = int(result.split()[-1])
                        count  += count
                    except (ValueError, IndexError):
                        pass  # 无法解析行数，跳过统计
                logger.info(f"✅ Upsert 完成，影响约 {count} 行")
        elif write == 'executemany_num':
            logger.info("使用普通 INSERT 模式（executemany临时表）写入数据")
            # 创建临时表
            logger.debug(f"开始建临时表 {schema}.{table} ")
            temp_table = f"_temp_{table}_{pd.Timestamp.now().strftime('%Y%m%d%H%M%S%f')}"
            await conn.execute(f'CREATE TEMP TABLE "{temp_table}" (LIKE "{schema}"."{table}" INCLUDING ALL)') 

            logger.debug(f"开始写入数据到临时表 {temp_table}")
            records = list(df.itertuples(index=False, name=None))
            placeholders = ', '.join(['$' + str(i + 1) for i in range(len(df_cols))])
            # insert_temp = f'INSERT INTO "{temp_table}" ({", ".join(f"""\"{c}\"""" for c in df_cols)}) VALUES ({placeholders})'
            col_names = ", ".join(f'"{c}"' for c in df_cols)
            insert_temp = f'INSERT INTO "{temp_table}" ({col_names}) VALUES ({placeholders})'
            await conn.executemany(insert_temp, records)

            # 合并到主表
            quoted_df_cols = [f'"{c}"' for c in df_cols]
            quoted_pk_cols = [f'"{c}"' for c in pk_cols]
            if not pk_cols:
                logger.debug(f"表 {schema}.{table} 无主键字段，直接写入全量数据")
                result = await conn.execute(f'INSERT INTO "{schema}"."{table}" SELECT * FROM "{temp_table}"')
                logger.info(f"✅ 插入 {result} 行到 {schema}.{table}")
            else:

                if update:
                    logger.debug(f"主键冲突时update数据到 {schema}.{table}")
                    update_set = ', '.join([f'"{col}" = EXCLUDED."{col}"' for col in df_cols if col not in pk_cols])
                    conflict_action = f"DO UPDATE SET {update_set}"
                else :
                    logger.debug(f"主键冲突时ignore写入到 {schema}.{table}")
                    conflict_action = "DO NOTHING"

                upsert_sql = f"""
                    INSERT INTO "{schema}"."{table}" ({', '.join(quoted_df_cols)})
                    SELECT {', '.join(quoted_df_cols)} FROM "{temp_table}"
                    ON CONFLICT ({', '.join(quoted_pk_cols)}) {conflict_action}
                """
                result = await conn.execute(upsert_sql)
                # logger.debug(f"✅ Upsert SQL: {upsert_sql}")
            count = int(result.split()[-1])
            logger.info(f"✅ {count} 行已更新")
        else:
            raise ValueError(f"无效的写入模式：{write}")
        
    end_time = time.time()
    logger.info(f"✅ 数据写入{schema}.{table}完成,总耗时 {end_time - start_time:.2f} 秒")
    return count