import io
import csv
import time
import pandas as pd
import asyncpg
from typing import Dict, List, Optional, Union, Literal, Any
from sqlalchemy.ext.asyncio import AsyncSession
import logging

logger = logging.getLogger("pgtool")


async def write_topgsql_async2(
    df: pd.DataFrame,
    schema: str,
    table: str,
    conn: Union[asyncpg.Connection, AsyncSession, Any],
    create: bool = False,
    update: bool = False,
    add_columns: bool = False,
    only_column: bool = False,
    write: Optional[Literal['auto', 'copy', 'execute', 'executemany', 'executemany_num']] = 'auto',
):
    """
    异步将 DataFrame 写入 PostgreSQL 表，支持多种高级选项。
    现在支持 asyncpg.Connection 和 SQLAlchemy AsyncSession。
    
    parms
        :df (pd.DataFrame): 要写入的数据。
        :schema (str): 数据库模式名称。
        :table (str): 表名。
        :conn: 连接对象，支持 asyncpg.Connection 或 SQLAlchemy AsyncSession。
        :create (bool, optional): 如果表不存在，是否创建表。
        :update (bool, optional): 是否更新表。
        :add_columns (bool, optional): 是否添加列。
        :only_column (bool, optional): 是否只保留数据库中已存在的列。
        :write: 写入模式。
        :return: 影响的行数
    """
    start_time = time.time()
    
    # 连接类型适配
    actual_conn = await _get_asyncpg_connection(conn)
    
    if df.empty:
        logger.warning("DataFrame为空,跳过写入")
        return 0
        
    df = df.where(pd.notnull(df), None)
    logger.info(f"✅ 开始写入数据到 {schema}.{table}")

    # 使用连接的事务管理
    async with actual_conn.transaction():
        return await _write_data(
            df=df,
            schema=schema,
            table=table,
            conn=actual_conn,
            create=create,
            update=update,
            add_columns=add_columns,
            only_column=only_column,
            write=write,
            start_time=start_time
        )
    end_time = time.time()
    logger.info(f"数据写入耗时：{end_time - start_time}秒")

async def _get_asyncpg_connection(conn: Any) -> asyncpg.Connection:
    """从不同的连接对象中提取 asyncpg 连接"""
    if isinstance(conn, asyncpg.Connection):
        return conn
        
    elif isinstance(conn, AsyncSession):
        # SQLAlchemy AsyncSession
        try:
            # SQLAlchemy 2.0+ 方式
            raw_conn = await conn.connection()
            return raw_conn.driver_connection
        except (AttributeError, TypeError):
            try:
                # SQLAlchemy 1.4+ 方式
                raw_conn = conn.sync_session.connection()
                return raw_conn.connection.driver_connection
            except AttributeError:
                raise ValueError("无法从 AsyncSession 获取 asyncpg 连接")
    
    else:
        # 尝试自动检测
        if hasattr(conn, 'driver_connection') and hasattr(conn.driver_connection, 'execute'):
            # SQLAlchemy 异步连接
            return conn.driver_connection
        elif hasattr(conn, 'get_raw_connection'):
            # 其他异步连接包装器
            raw_conn = await conn.get_raw_connection()
            return raw_conn.driver_connection if hasattr(raw_conn, 'driver_connection') else raw_conn
        else:
            raise ValueError(f"不支持的连接类型: {type(conn)}")


async def _write_data(
    df: pd.DataFrame,
    schema: str,
    table: str,
    conn: asyncpg.Connection,
    create: bool,
    update: bool,
    add_columns: bool,
    only_column: bool,
    write: str,
    start_time: float
) -> int:
    """实际的写入逻辑"""
    
    logger.debug(f"获取表 {schema}.{table}列和字段类型")
    db_columns_info = await conn.fetch("""
        SELECT column_name, data_type
        FROM information_schema.columns
        WHERE table_schema = $1 AND table_name = $2
        ORDER BY ordinal_position;
    """, schema, table)

    db_columns = [row['column_name'] for row in db_columns_info]
    db_column_types = {row['column_name']: row['data_type'] for row in db_columns_info}
    table_exists = len(db_columns) > 0

    dtype_mapping = {
        'int64': 'BIGINT',
        'int32': 'INTEGER',
        'float64': 'DOUBLE PRECISION',
        'float32': 'REAL',
        'bool': 'BOOLEAN',
        'object': 'varchar',
        'datetime64[ns]': 'TIMESTAMP',
        'timedelta64[ns]': 'INTERVAL'
    }

    if not table_exists:
        logger.debug(f"表 {schema}.{table} 不存在")
        if create:
            logger.debug(f"开始创建表 {schema}.{table}")
            cols_def = []
            for col in df.columns:
                pd_type = str(df[col].dtype)
                pg_type = dtype_mapping.get(pd_type, 'TEXT')
                logger.debug(f"{col}: {pd_type} -> {pg_type}")
                cols_def.append(f'"{col}" {pg_type}')
            full_table = f'"{schema}"."{table}"'
            create_sql = f"CREATE TABLE {full_table} ({', '.join(cols_def)})"
            await conn.execute(create_sql)
            db_columns = list(df.columns)
            db_column_types = {col: 'TEXT' for col in df.columns}
        else:
            raise ValueError(f"""Table "{schema}"."{table}" does not exist and create=False""")

    # 字段对齐
    df_cols = list(df.columns)
    if only_column:
        logger.debug(f"仅保存表 {schema}.{table} 已存在的列")
        common_cols = [c for c in df_cols if c in db_columns]
        df = df[common_cols]
        df_cols = common_cols
    elif add_columns:
        new_cols = [c for c in df_cols if c not in db_columns]
        full_table = f'"{schema}"."{table}"'
        for col in new_cols:
            logger.debug(f"添加表 {schema}.{table} 缺少的列{col}")
            pd_type = str(df[col].dtype)
            pg_type = dtype_mapping.get(pd_type, 'TEXT')
            await conn.execute(f'ALTER TABLE {full_table} ADD COLUMN "{col}" {pg_type}')
        db_columns.extend(new_cols)
        db_column_types.update({col: dtype_mapping.get(str(df[col].dtype), 'TEXT') for col in new_cols})
    else:
        extra = set(df_cols) - set(db_columns)
        if extra:
            raise ValueError(f"列 {extra} 不在目标表中，且 add_columns=False")

    # 获取主键
    logger.debug(f"获取表 {schema}.{table} 的主键字段")
    pk_rows = await conn.fetch("""
        SELECT kcu.column_name
        FROM information_schema.table_constraints tc
        JOIN information_schema.key_column_usage kcu
            ON tc.constraint_name = kcu.constraint_name
            AND tc.table_schema = kcu.table_schema
        WHERE tc.constraint_type = 'PRIMARY KEY'
            AND tc.table_schema = $1
            AND tc.table_name = $2
        ORDER BY kcu.ordinal_position;
    """, schema, table)
    pk_cols = [row['column_name'] for row in pk_rows]

    if pk_cols:
        logger.debug(f"表 {schema}.{table} 的主键字段为 {pk_cols}")
        if not set(pk_cols).issubset(set(df.columns)):
            raise ValueError(f"❌ 主键列{pk_cols}并未全部存在于DataFrame中")
        if df[pk_cols].duplicated().any():
            raise ValueError("❌ DataFrame在主键字段上包含重复行")
    else:
        logger.debug(f"⚠️ 表 {schema}.{table} 没有主键字段")

    # 选择写入模式
    nrows, ncols = df.shape
    if write == "auto":
        write = "copy" if nrows * ncols > 65535 else "executemany_num"

    # 执行写入
    if write == "copy":
        return await _write_copy(df, schema, table, conn, df_cols, pk_cols, update)
    elif write == "executemany":
        return await _write_executemany(df, schema, table, conn, df_cols, pk_cols, update)
    elif write == "execute":
        return await _write_execute(df, schema, table, conn, df_cols, pk_cols, update)
    elif write == "executemany_num":
        return await _write_executemany_num(df, schema, table, conn, df_cols, pk_cols, update, start_time)
    else:
        raise ValueError(f"无效的写入模式：{write}")


async def _write_copy(df, schema, table, conn, df_cols, pk_cols, update):
    """COPY 模式写入"""
    logger.info("使用 COPY 模式写入数据")
    temp_table = f"_temp_{table}_{pd.Timestamp.now().strftime('%Y%m%d%H%M%S%f')}"
    await conn.execute(f'CREATE TEMP TABLE "{temp_table}" AS SELECT * FROM "{schema}"."{table}" WHERE 1=0 ')

    with io.BytesIO() as buffer:
        logger.debug("开始准备临时表数据....")
        df_subset = df[df_cols].copy()
        
        csv_content = df_subset.to_csv(
            index=False, 
            header=False, 
            sep='\t',
            na_rep=r'\N',
            quoting=csv.QUOTE_MINIMAL
        )
        buffer.write(csv_content.encode('utf-8'))
        buffer.seek(0)

        logger.debug(f"开始写入数据到临时表{temp_table}")
        await conn.copy_to_table(
            table_name=temp_table,
            source=buffer,
            columns=df_cols,
            format='csv',
            delimiter='\t',
            null=r'\N'
        )

    temp_count = await conn.fetchval(f'SELECT COUNT(*) FROM "{temp_table}"')
    logger.info(f"临时表 {temp_table} 中有 {temp_count} 行数据")

    quoted_df_cols = [f'"{c}"' for c in df_cols]
    quoted_pk_cols = [f'"{c}"' for c in pk_cols]

    if not pk_cols:
        logger.debug(f"表 {schema}.{table} 无主键字段，直接写入全量数据")
        result = await conn.execute(f'INSERT INTO "{schema}"."{table}" SELECT * FROM "{temp_table}"')
    else:
        if update:
            update_set = ', '.join([f'"{col}" = EXCLUDED."{col}"' for col in df_cols if col not in pk_cols])
            conflict_action = f"DO UPDATE SET {update_set}"
        else:
            conflict_action = "DO NOTHING"

        upsert_sql = f"""
            INSERT INTO "{schema}"."{table}" ({', '.join(quoted_df_cols)})
            SELECT {', '.join(quoted_df_cols)} FROM "{temp_table}"
            ON CONFLICT ({', '.join(quoted_pk_cols)}) {conflict_action}
        """
        result = await conn.execute(upsert_sql)
    
    count = int(result.split()[-1])
    logger.info(f"✅ {count} 行已更新")
    return count


async def _write_executemany(df, schema, table, conn, df_cols, pk_cols, update):
    """executemany 模式写入"""
    logger.info("使用普通 INSERT 模式（executemany）写入数据")
    quoted_df_cols = [f'"{c}"' for c in df_cols]
    placeholders = ', '.join([f'${i+1}' for i in range(len(df_cols))])

    if not pk_cols:
        insert_sql = f'INSERT INTO "{schema}"."{table}" ({", ".join(quoted_df_cols)}) VALUES ({placeholders})'
        values_list = [tuple(row[c] for c in df_cols) for _, row in df.iterrows()]
        await conn.executemany(insert_sql, values_list)
        count = len(values_list)
        logger.info(f"✅ 插入 {count} 行到 {schema}.{table}")
        return count
    else:
        if update:
            update_set = ', '.join([f'"{col}" = EXCLUDED."{col}"' for col in df_cols if col not in pk_cols])
            conflict_action = f"DO UPDATE SET {update_set}"
        else:
            conflict_action = "DO NOTHING"

        upsert_sql = f"""
            INSERT INTO "{schema}"."{table}" ({", ".join(quoted_df_cols)})
            VALUES ({placeholders})
            ON CONFLICT ({", ".join([f'"{c}"' for c in pk_cols])}) {conflict_action}
        """
        values_list = [tuple(row[c] for c in df_cols) for _, row in df.iterrows()]
        await conn.executemany(upsert_sql, values_list)
        logger.info(f"✅ Upsert 完成，尝试写入 {len(values_list)} 行")
        return len(values_list)  # 近似值


async def _write_execute(df, schema, table, conn, df_cols, pk_cols, update):
    """execute 模式写入"""
    logger.info("使用普通 INSERT 模式(execute)写入数据")
    quoted_df_cols = [f'"{c}"' for c in df_cols]
    placeholders = ', '.join([f'${i+1}' for i in range(len(df_cols))])

    count = 0
    if not pk_cols:
        insert_sql = f'INSERT INTO "{schema}"."{table}" ({", ".join(quoted_df_cols)}) VALUES ({placeholders})'
        for _, row in df.iterrows():
            values = tuple(row[c] for c in df_cols)
            await conn.execute(insert_sql, *values)
            count += 1
        logger.info(f"✅ 插入 {count} 行到 {schema}.{table}")
    else:
        if update:
            update_set = ', '.join([f'"{col}" = EXCLUDED."{col}"' for col in df_cols if col not in pk_cols])
            conflict_action = f"DO UPDATE SET {update_set}"
        else:
            conflict_action = "DO NOTHING"

        upsert_sql = f"""
            INSERT INTO "{schema}"."{table}" ({", ".join(quoted_df_cols)})
            VALUES ({placeholders})
            ON CONFLICT ({", ".join([f'"{c}"' for c in pk_cols])}) {conflict_action}
        """
        for _, row in df.iterrows():
            values = tuple(row[c] for c in df_cols)
            result = await conn.execute(upsert_sql, *values)
            try:
                count += int(result.split()[-1])
            except (ValueError, IndexError):
                count += 1
        logger.info(f"✅ Upsert 完成，影响约 {count} 行")
    
    return count


async def _write_executemany_num(df, schema, table, conn, df_cols, pk_cols, update, start_time):
    """executemany_num 模式写入"""
    logger.info("使用普通 INSERT 模式（executemany临时表）写入数据")
    temp_table = f"_temp_{table}_{pd.Timestamp.now().strftime('%Y%m%d%H%M%S%f')}"
    await conn.execute(f'CREATE TEMP TABLE "{temp_table}" (LIKE "{schema}"."{table}" INCLUDING ALL)')

    records = list(df.itertuples(index=False, name=None))
    placeholders = ', '.join(['$' + str(i + 1) for i in range(len(df_cols))])
    col_names = ", ".join(f'"{c}"' for c in df_cols)
    insert_temp = f'INSERT INTO "{temp_table}" ({col_names}) VALUES ({placeholders})'
    await conn.executemany(insert_temp, records)

    quoted_df_cols = [f'"{c}"' for c in df_cols]
    quoted_pk_cols = [f'"{c}"' for c in pk_cols]
    
    if not pk_cols:
        result = await conn.execute(f'INSERT INTO "{schema}"."{table}" SELECT * FROM "{temp_table}"')
        count = int(result.split()[-1])
    else:
        if update:
            update_set = ', '.join([f'"{col}" = EXCLUDED."{col}"' for col in df_cols if col not in pk_cols])
            conflict_action = f"DO UPDATE SET {update_set}"
        else:
            conflict_action = "DO NOTHING"

        upsert_sql = f"""
            INSERT INTO "{schema}"."{table}" ({', '.join(quoted_df_cols)})
            SELECT {', '.join(quoted_df_cols)} FROM "{temp_table}"
            ON CONFLICT ({', '.join(quoted_pk_cols)}) {conflict_action}
        """
        result = await conn.execute(upsert_sql)
        count = int(result.split()[-1])
    
    logger.info(f"✅ {count} 行已更新")
    
    end_time = time.time()
    logger.info(f"✅ 数据写入{schema}.{table}完成,总耗时 {end_time - start_time:.2f} 秒")
    return count