import pandas as pd
import psycopg2
from psycopg2 import sql
from io import StringIO
import logging
import argparse
import os
import json
import time
import sys

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(
            r"C:\Users\17610\Desktop\python脚本\log\csv_import.log",
            encoding='utf-8'
        ),
        logging.StreamHandler(),
    ]
)

# 日志目录，用于保存失败数据文件
LOG_DIR = os.path.dirname(r"C:\Users\17610\Desktop\python脚本\log\csv_import.log")
# 失败数据文件路径
FAILED_DATA_FILE = os.path.join(LOG_DIR, "failed_import_data.csv")

# 数据库配置
DB_CONFIG = {
    "host": "172.31.169.55",
    "port": "5432",
    "dbname": "postgres",
    "user": "postgres",
    "password": "123456",
    "options": "-c client_encoding=utf8",
    "sslmode": "disable"
}

# 状态文件路径
STATE_FILE = r"C:\Users\17610\Desktop\python脚本\log\import_state.json"
# 最大重试次数
MAX_RETRIES = 3
# 重试延迟（秒）
RETRY_DELAY = 5

def load_state():
    """加载导入状态"""
    if os.path.exists(STATE_FILE):
        try:
            with open(STATE_FILE, 'r') as f:
                return json.load(f)
        except Exception as e:
            logging.error(f"加载状态文件失败: {str(e)}")
    return {
        "current_chunk": 0,
        "processed_rows": 0,
        "failed_chunks": [],
        "csv_path": "",
        "table_name": "",
        "header": []  # 保存CSV表头
    }

def save_state(state):
    """保存导入状态"""
    try:
        with open(STATE_FILE, 'w') as f:
            json.dump(state, f, indent=2)
    except Exception as e:
        logging.error(f"保存状态文件失败: {str(e)}")

def clear_state():
    """清除状态文件"""
    if os.path.exists(STATE_FILE):
        try:
            os.remove(STATE_FILE)
            logging.info("状态文件已清除")
        except Exception as e:
            logging.error(f"清除状态文件失败: {str(e)}")

def fix_mojibake(text):
    if not isinstance(text, str) or text.strip() == "":
        return text
    
    # 按优先级尝试编码修复
    repair_methods = [
        ("latin1→utf-8", lambda t: t.encode('latin1').decode('utf-8')),
        ("gbk→utf-8", lambda t: t.encode('gbk').decode('utf-8')),
        ("utf-8→gbk", lambda t: t.encode('utf-8').decode('gbk')),
    ]
    
    for name, func in repair_methods:
        try:
            repaired = func(text)
            if repaired != text:
                logging.debug(f"[{name}]修复成功: 原[{text[:20]}] → 新[{repaired[:20]}]")
                return repaired
        except Exception as e:
            logging.debug(f"[{name}]修复失败: {text[:20]}, 错误: {str(e)}")
    
    return text

def process_chunk(chunk):
    """处理数据块：修复乱码+清理数据"""
    chunk = chunk.astype(str)
    
    if 'albe0316' in chunk.columns:
        chunk['albe0316'] = chunk['albe0316'].apply(fix_mojibake)
        logging.debug(f"修复albe0316列: {chunk['albe0316'].head(2).tolist()}")
    
    regex_patterns = {
        r'\s+': ' '  # 合并连续空格
    }
    chunk.replace(regex_patterns, regex=True, inplace=True)
    
    string_mappings = {
        'null': None,
        '<br>': '',
        '\\N': None,
        'NaN': None,
        'nan': None,
        'None': None,
        '""': None,
        '""""': None
    }
    chunk.replace('"', '', regex=True, inplace=True)
    chunk.replace(string_mappings, regex=False, inplace=True)
    
    logging.debug(f"修复后数据:\n{chunk.head(3).to_csv(sep='\t')}")  
    
    return chunk

def write_to_postgres(conn, chunk, full_table_name):
    """使用COPY命令高效写入数据块（支持schema.table格式）"""
    try:
        with conn.cursor() as cursor:
            cursor.execute("SHOW client_encoding;")
            encoding = cursor.fetchone()[0]
            logging.info(f"当前连接编码: {encoding}")
            buffer = StringIO()
            chunk.to_csv(buffer, index=False, header=False, sep='\t', na_rep='\\N')
            buffer.seek(0)
            
            # 解析schema和table
            schema, table = full_table_name.split('.') if '.' in full_table_name else ('public', full_table_name)
            
            cursor.copy_expert(
                sql.SQL("""
                COPY {}.{} FROM STDIN 
                WITH (
                    FORMAT CSV, 
                    DELIMITER E'\t', 
                    NULL '\\N',
                    QUOTE '"',
                    ESCAPE '\\'
                )
                """).format(sql.Identifier(schema), sql.Identifier(table)),
                buffer
            )
        conn.commit()
        return True
    except Exception as e:
        logging.error(f"写入数据库失败: {str(e)}")
        conn.rollback()
        return False

def calculate_real_rows(csv_path):
    """统计CSV实际有效行数（排除表头和空行）并获取表头"""
    real_rows = 0
    header = []
    with open(csv_path, 'r', encoding='utf-8', errors='ignore') as f:
        header = next(f, '').strip().split(',')  # 获取表头
        for line in f:
            if line.strip():  # 非空行计数
                real_rows += 1
    return real_rows, header

def write_failed_data(chunk, header, chunk_idx, is_remaining=False):
    """将失败的数据块或剩余未处理数据写入CSV文件"""
    try:
        # 确保目录存在
        os.makedirs(LOG_DIR, exist_ok=True)
        
        # 追加模式打开文件，首次写入时添加表头
        mode = 'a'
        if not os.path.exists(FAILED_DATA_FILE):
            mode = 'w'
        
        with open(FAILED_DATA_FILE, mode, encoding='utf-8', newline='') as f:
            # 写入表头（首次写入时）
            if mode == 'w' and header:
                f.write(','.join(header) + '\n')
            
            # 添加数据来源标识
            source = "剩余未处理数据" if is_remaining else f"失败块{chunk_idx}"
            chunk['数据来源'] = source  # 添加列标记数据来源
            
            # 写入数据块（排除新增的临时列）
            chunk.to_csv(f, index=False, header=False, encoding='utf-8', columns=[col for col in chunk.columns if col != '数据来源'])
        
        logging.info(f"{source}已写入: {FAILED_DATA_FILE}")
    except Exception as e:
        logging.error(f"写入失败数据文件失败: {str(e)}")

def read_remaining_data(csv_path, start_row, total_rows, header):
    """读取剩余未处理的CSV数据"""
    try:
        # 计算需要读取的行数
        remaining_rows = total_rows - start_row
        if remaining_rows <= 0:
            return pd.DataFrame()
        
        # 读取指定行（跳过已处理部分）
        return pd.read_csv(
            csv_path,
            skiprows=range(1, start_row + 1),  # 1=表头，start_row=已处理数据行
            nrows=remaining_rows,
            dtype=str,
            low_memory=False,
            names=header  # 使用原始表头
        )
    except Exception as e:
        logging.error(f"读取剩余数据失败: {str(e)}")
        return pd.DataFrame()

def list_tables_in_schema(conn, schema_name):
    """列出指定模式下的所有表"""
    try:
        with conn.cursor() as cursor:
            cursor.execute("""
                SELECT table_name 
                FROM information_schema.tables 
                WHERE table_schema = %s 
                ORDER BY table_name;
            """, (schema_name,))
            tables = [row[0] for row in cursor.fetchall()]
            return tables
    except Exception as e:
        logging.error(f"获取表列表失败: {str(e)}")
        return []

def main(csv_path, table_name, schema_name="public"):
    """主处理函数：智能判断执行导入或表查找"""
    # 表查找逻辑（当未提供csv_path时触发）
    if not csv_path or not table_name:
        try:
            conn = psycopg2.connect(**DB_CONFIG)
            logging.info(f"数据库连接成功，正在查找模式 '{schema_name}' 下的表...")
            
            tables = list_tables_in_schema(conn, schema_name)
            
            if tables:
                logging.info(f"在模式 '{schema_name}' 下找到 {len(tables)} 个表:")
                for table in tables:
                    logging.info(f"  - {table}")
            else:
                logging.info(f"在模式 '{schema_name}' 下未找到表")
                
        except Exception as e:
            logging.error(f"表查找失败: {str(e)}")
        finally:
            if 'conn' in locals() and conn:
                conn.close()
                logging.info("数据库连接已关闭")
        return
    
    # 导入模式处理（原有逻辑）
    state = load_state()
    last_exception = None
    
    # 统计总行数并获取表头
    total_rows, header = calculate_real_rows(csv_path)
    state["header"] = header
    logging.info(f"实际有效行数: {total_rows}")
    
    # 检查不同CSV的未完成任务
    if state["csv_path"] and state["csv_path"] != csv_path:
        logging.warning(f"检测到未完成的不同CSV任务({state['csv_path']})")
        if input("是否清除状态并开始新任务? (y/n): ").lower() == 'y':
            clear_state()
            state = load_state()
        else:
            logging.info("退出程序")
            return
    
    # 处理带schema的表名（如 schema.table）
    full_table_name = table_name
    if schema_name and '.' not in table_name:
        full_table_name = f"{schema_name}.{table_name}"
    
    # 更新任务状态
    state.update({
        "csv_path": csv_path,
        "table_name": full_table_name,
        "failed_chunks": state.get("failed_chunks", []),
        "header": header
    })
    save_state(state)
    
    # 数据库连接
    conn = None
    try:
        conn = psycopg2.connect(**DB_CONFIG)
        logging.info("数据库连接成功")
    except Exception as e:
        last_exception = e
        logging.error(f"连接失败: {str(e)}")
        return
    
    try:
        processed_rows = state.get("processed_rows", 0)
        current_chunk = state.get("current_chunk", 0)
        
        logging.info(f"开始处理: 总行数={total_rows}, 已处理={processed_rows}, 当前块={current_chunk}")
        
        # 动态计算剩余行数和chunksize
        remaining_rows = total_rows - processed_rows
        if remaining_rows <= 0:
            logging.info("导入完成，无剩余数据")
            clear_state()
            return
        
        chunksize = min(20000, remaining_rows)
        total_chunks = (remaining_rows + chunksize - 1) // chunksize
        logging.info(f"动态块大小: {chunksize}, 总块数: {total_chunks}")
        
        # 读取CSV
        skiprows = 1 + processed_rows  # 1=表头，processed_rows=已处理数据行
        reader = pd.read_csv(
            csv_path,
            chunksize=chunksize,
            skiprows=range(1, skiprows) if skiprows > 1 else None,
            dtype=str,
            low_memory=False
        )
        
        # 处理每个数据块
        for chunk_idx, chunk in enumerate(reader, current_chunk + 1):
            if len(chunk) == 0:
                logging.info(f"跳过空块 {chunk_idx}")
                state["current_chunk"] = chunk_idx
                save_state(state)
                continue
            
            state["current_chunk"] = chunk_idx
            save_state(state)
            
            # 处理数据块
            try:
                processed = process_chunk(chunk)
            except Exception as e:
                last_exception = e
                logging.error(f"处理块{chunk_idx}失败: {str(e)}")
                state["failed_chunks"].append({
                    "chunk": chunk_idx,
                    "start_row": processed_rows + 1,
                    "end_row": processed_rows + len(chunk),
                    "error": str(e),
                    "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
                })
                write_failed_data(chunk, header, chunk_idx)
                save_state(state)
                processed_rows += len(chunk)
                continue
            
            # 写入数据库（带重试）
            success = False
            for attempt in range(1, MAX_RETRIES + 1):
                try:
                    if write_to_postgres(conn, processed, full_table_name):
                        success = True
                        break
                    logging.warning(f"块{chunk_idx}写入失败，重试{attempt}/{MAX_RETRIES}")
                    time.sleep(RETRY_DELAY)
                except Exception as e:
                    last_exception = e
                    logging.error(f"写入尝试{attempt}失败: {str(e)}")
                    time.sleep(RETRY_DELAY)
            
            if success:
                processed_rows += len(chunk)
                state["processed_rows"] = processed_rows
                save_state(state)
                
                # 进度报告
                progress = min(100, round((processed_rows / total_rows) * 100, 1))
                logging.info(
                    f"进度: {progress}% | "
                    f"块: {chunk_idx}/{total_chunks} | "
                    f"行: {processed_rows}/{total_rows} | "
                    f"当前块大小: {len(chunk)}"
                )
            else:
                last_exception = Exception("多次重试失败")
                logging.error(f"块{chunk_idx}重试{MAX_RETRIES}次后失败")
                state["failed_chunks"].append({
                    "chunk": chunk_idx,
                    "start_row": processed_rows + 1,
                    "end_row": processed_rows + len(chunk),
                    "error": "多次重试后失败",
                    "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
                })
                write_failed_data(chunk, header, chunk_idx)
                save_state(state)
                processed_rows += len(chunk)
        
        # 最终状态处理
        if processed_rows >= total_rows:
            if state["failed_chunks"]:
                success_rows = processed_rows - sum(fc['end_row'] - fc['start_row'] + 1 for fc in state["failed_chunks"])
                logging.warning(f"处理完成! 成功行数: {success_rows}/{total_rows}")
                logging.warning(f"失败块数: {len(state['failed_chunks'])}")
                logging.info(f"失败数据已保存至: {FAILED_DATA_FILE}")
            else:
                logging.info(f"处理完成! 共处理{processed_rows}行")
            clear_state()
        else:
            # 处理中断时保存剩余数据
            remaining_data = read_remaining_data(csv_path, processed_rows, total_rows, header)
            if not remaining_data.empty:
                logging.info(f"检测到{len(remaining_data)}行剩余未处理数据")
                write_failed_data(remaining_data, header, "剩余数据", is_remaining=True)
            
            error_msg = f"处理中断! 成功行数: {processed_rows}/{total_rows}"
            if last_exception:
                error_msg += f", 原因: {str(last_exception)}"
            logging.warning(error_msg)
            if last_exception:
                logging.debug("异常详情:", exc_info=last_exception)
            logging.info(f"状态已保存，下次从块{state['current_chunk'] + 1}继续")
            logging.info(f"失败数据已保存至: {FAILED_DATA_FILE}")
    
    except Exception as e:
        last_exception = e
        logging.error(f"处理失败: {str(e)}", exc_info=True)
        # 处理中断时保存剩余数据
        remaining_data = read_remaining_data(csv_path, processed_rows, total_rows, header)
        if not remaining_data.empty:
            logging.info(f"检测到{len(remaining_data)}行剩余未处理数据")
            write_failed_data(remaining_data, header, "剩余数据", is_remaining=True)
        logging.info(f"状态已保存，下次从块{state['current_chunk']}继续")
        logging.info(f"失败数据已保存至: {FAILED_DATA_FILE}")
    finally:
        if conn:
            conn.close()
            logging.info("数据库连接已关闭")
        if processed_rows < total_rows and last_exception:
            logging.warning(f"程序终止! 未完成导入，原因: {last_exception}")
        elif state["failed_chunks"]:
            logging.info(f"失败数据已保存至: {FAILED_DATA_FILE}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='CSV导入PostgreSQL（支持模式指定+表查找+断点续传）')
    parser.add_argument('csv_path', nargs='?', help='CSV文件路径（导入模式）')
    parser.add_argument('table_name', nargs='?', help='目标表名（导入模式）')
    parser.add_argument('--schema', default='public', help='模式名称（导入时自动拼接表名，查找表时指定模式）')
    
    args = parser.parse_args()
    
    # 智能判断执行导入或表查找
    if args.csv_path and args.table_name:
        main(args.csv_path, args.table_name, args.schema)
    else:
        main(None, None, args.schema)