import pandas as pd
from sqlalchemy import create_engine, text, inspect
import re
import logging
import numpy as np
from datetime import datetime

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('policy_import.log'),
        logging.StreamHandler()
    ]
)

# 数据库配置
DB_CONFIG = {
    'host': 'localhost',
    'port': '5432',
    'database': 'Policy_DB',
    'user': 'postgres',
    'password': 'postgres'
}

# Excel到SQL列名映射
COLUMN_MAPPING = {
    '政策ID': 'policy_id',
    '标题': 'title',
    '成文日期': 'write_time',
    '发文日期': 'publish_time',
    '实施日期': 'start_time',
    '废止日期': 'end_time',
    '是否有效': 'validity',
    '文件类型': 'file_type',
    '发文单位': 'department',
    '文号': 'document_number',
    '正文': 'main_text',
    '是否是惠企政策': 'is_company_policy',
    '是否是跨境政策': 'is_crossborder_policy',
    '政策级别': 'level',
    '政策类型': 'type',
    '一级地址': 'address_1',
    '二级地址': 'address_2',
    '三级地址': 'address_3',
    '四级地址': 'address_4',
    '五级地址': 'address_5',
    '六级地址': 'address_6'
}

def clean_and_validate_data(df):
    """数据清洗和验证"""
    logger = logging.getLogger('clean_data')
    
    # 日期列处理
    date_cols = ['write_date', 'publish_date', 'start_date', 'end_date']
    for col in date_cols:
        if col in df.columns:
            # 尝试转换为日期格式
            df[col] = pd.to_datetime(df[col], errors='coerce')
            # 记录转换失败的行
            date_errors = df[df[col].isna() & df[col].notna()]  # 原始不为空但转换失败
            if not date_errors.empty:
                logger.warning(f"日期格式错误在列 {col}，行数: {len(date_errors)}")
    
    # 布尔值处理
    bool_cols = ['if_company_policy', 'if_crossborder_policy', 'validity']
    for col in bool_cols:
        if col in df.columns:
            # 统一布尔值表示
            df[col] = df[col].map({'是': True, '否': False, True: True, False: False})
            # 填充缺失值为False
            df[col] = df[col].fillna(False).astype(bool)
    
    # 文本列处理
    text_cols = ['title', 'department', 'document_number', 'main_text']
    for col in text_cols:
        if col in df.columns:
            # 去除首尾空格，替换空字符串为None
            df[col] = df[col].str.strip().replace('', None)
    
    # 地址列处理
    address_cols = [f'address_{i}' for i in range(1, 7)]
    for col in address_cols:
        if col in df.columns:
            # 填充空值为空字符串
            df[col] = df[col].fillna('')
    
    return df

def process_labels(df, label_column, table_name, id_column='policy_id'):
    """处理标签数据并准备导入"""
    logger = logging.getLogger('process_labels')
    valid_records = []
    error_records = []
    
    # 遍历每一行
    for _, row in df.iterrows():
        policy_id = row[id_column]
        labels = str(row[label_column]).strip()
        
        # 跳过空标签
        if not labels or labels.lower() in ['nan', 'null', '']:
            continue
        
        # 拆分多个标签
        for label in labels.split(','):
            label = label.strip()
            
            # 验证标签格式
            if re.match(r'^\d+-\d+$', label):
                # 提取一级和二级标签
                parts = label.split('-')
                level1 = int(parts[0])
                level2 = int(parts[1])
                
                valid_records.append({
                    id_column: policy_id,
                    f"{table_name}_1": level1,
                    f"{table_name}_2": level2
                })
            else:
                error_records.append({
                    id_column: policy_id,
                    'label': label,
                    'reason': '格式错误'
                })
                logger.warning(f"无效标签格式: 政策ID {policy_id}, 标签 '{label}'")
    
    # 创建结果DataFrame
    if valid_records:
        result_df = pd.DataFrame(valid_records)
    else:
        result_df = pd.DataFrame(columns=[id_column, f"{table_name}_1", f"{table_name}_2"])
    
    # 记录处理结果
    logger.info(f"处理标签列 '{label_column}': 有效标签 {len(valid_records)}条, 错误标签 {len(error_records)}条")
    
    return result_df, error_records

def identify_long_fields(df, table_name, engine):
    """识别DataFrame中哪些字段超过了数据库表的最大长度限制"""
    logger = logging.getLogger('identify_long_fields')
    long_fields = []
    
    try:
        # 获取数据库表结构
        inspector = inspect(engine)
        columns = inspector.get_columns(table_name)
        
        # 创建列到最大长度的映射
        varchar_columns = {
            col['name']: int(col['type'].length) 
            for col in columns 
            if 'VARCHAR' in str(col['type'])
        }
        
        # 检查每个varchar列
        for col, max_length in varchar_columns.items():
            if col in df.columns and df[col].dtype == 'object':
                # 查找超长的记录
                too_long = df[col].str.len().gt(max_length)
                if too_long.any():
                    # 获取超长的记录
                    long_records = df[too_long][['policy_id', col]]
                    
                    # 记录每条超长记录的详细信息
                    for _, row in long_records.iterrows():
                        long_fields.append({
                            'policy_id': row['policy_id'],
                            'column': col,
                            'max_length': max_length,
                            'actual_length': len(row[col]),
                            'truncated_value': row[col][:50] + '...' if len(row[col]) > 50 else row[col]
                        })
        
        return long_fields
    
    except Exception as e:
        logger.error(f"识别超长字段时出错: {str(e)}")
        return []

def import_to_db(df, table_name, engine, id_column='policy_id'):
    """使用SQLAlchemy导入数据到PostgreSQL，并识别超长字段，跳过已存在的主键"""
    logger = logging.getLogger('import_data')
    try:
        # 先检查是否有超长字段
        long_fields = identify_long_fields(df, table_name, engine)
        if long_fields:
            logger.error(f"发现 {len(long_fields)} 个超长字段，无法导入到 {table_name}")
            for field in long_fields[:10]:  # 只显示前10条，避免日志过大
                logger.error(f"政策ID: {field['policy_id']}, 字段: {field['column']}, "
                             f"最大长度: {field['max_length']}, 实际长度: {field['actual_length']}, "
                             f"前50个字符: {field['truncated_value']}")
            return False, 0

        # 如果没有超长字段，继续处理
        # 步骤1: 获取目标表中已存在的 policy_id
        if id_column in df.columns:
            # 使用 text() 包装 SQL 查询以适应 SQLAlchemy 2.x
            existing_ids_query = text(f"SELECT {id_column} FROM {table_name}")
            try:
                # 执行查询并将结果读入 DataFrame
                existing_ids_df = pd.read_sql(existing_ids_query, engine)
                # 将已存在的 ID 转换为集合以提高查找效率
                existing_ids = set(existing_ids_df[id_column].tolist())
                logger.info(f"数据库 {table_name} 中已存在 {len(existing_ids)} 个 {id_column}")
            except Exception as e:
                # 如果表不存在，read_sql 会报错，这里假设没有已存在的 ID
                logger.warning(f"查询 {table_name} 中现有ID时出错 (表可能不存在): {e}")
                existing_ids = set()
        else:
            logger.error(f"DataFrame 中未找到指定的ID列 '{id_column}'")
            return False, 0

        # 步骤2: 从待导入数据中过滤掉已存在的ID
        original_count = len(df)
        # 使用 isin 和 ~ (取反) 来筛选出不在 existing_ids 中的行
        df_to_import = df[~df[id_column].isin(existing_ids)].copy()
        filtered_count = len(df_to_import)
        skipped_count = original_count - filtered_count

        if skipped_count > 0:
            logger.info(f"从 {original_count} 条记录中过滤掉 {skipped_count} 条已存在的记录，实际将导入 {filtered_count} 条记录到 {table_name}")

        # 如果没有需要导入的数据，则直接返回
        if filtered_count == 0:
            logger.info(f"没有新的记录需要导入到 {table_name}")
            return True, 0 # 成功但导入了0行

        # 步骤3: 导入过滤后的数据
        rows_imported = df_to_import.to_sql(
            table_name,
            engine,
            index=False,
            if_exists='append', # 仍然使用 append，因为我们已经过滤了重复项
            method='multi',
            chunksize=1000
        )
        logger.info(f"成功导入 {rows_imported} 行到 {table_name}")
        return True, rows_imported

    except Exception as e:
        logger.error(f"导入到 {table_name} 失败: {str(e)}")
        return False, 0

def main(excel_file_path):
    """主处理函数"""
    logger = logging.getLogger('main')
    start_time = datetime.now()
    logger.info(f"开始导入政策数据: {excel_file_path}")
    
    try:
        # 创建数据库引擎
        db_url = f"postgresql://{DB_CONFIG['user']}:{DB_CONFIG['password']}@{DB_CONFIG['host']}:{DB_CONFIG['port']}/{DB_CONFIG['database']}"
        engine = create_engine(db_url)
        
        # 测试数据库连接
        with engine.connect() as conn:
            conn.execute(text("SELECT 1"))
        logger.info("数据库连接测试成功")
        
        # 步骤1: 读取Excel文件
        logger.info("读取Excel文件...")
        df = pd.read_excel(
            excel_file_path,
            engine='openpyxl',
            dtype={col: str for col in ['政策ID', '文号']}
        )
        logger.info(f"成功读取Excel文件, 包含 {len(df)} 条政策记录")
        
        # 步骤2: 重命名列
        logger.info("重命名列以匹配数据库...")
        df.rename(columns=COLUMN_MAPPING, inplace=True)
        print("已更新的excel所有列：", df.columns)
        
        # 步骤3: 数据清洗和验证
        logger.info("清洗和验证数据...")
        df = clean_and_validate_data(df)
        
        # 步骤4: 导入政策内容表
        logger.info("导入政策内容表...")
        content_cols = list(COLUMN_MAPPING.values())
        content_df = df[content_cols].copy()
        success, count = import_to_db(content_df, 'policy_content_table', engine)
        
        if not success:
            logger.error("政策内容表导入失败，终止执行")
            return
        
        # 步骤5: 处理并导入国标标签表
        logger.info("处理国标标签...")
        gb_label_df, gb_errors = process_labels(
            df, 
            label_column='国标二级产业类型',
            table_name='gb_industry',
            id_column='policy_id'
        )
        
        if not gb_label_df.empty:
            gb_success, gb_count = import_to_db(
                gb_label_df, 
                'policy_gb_label_table', 
                engine
            )
            if gb_success:
                logger.info(f"成功导入 {gb_count} 条国标标签记录")
        else:
            logger.info("没有有效的国标标签需要导入")
        
        # 步骤6: 处理并导入战新标签表
        logger.info("处理战新标签...")
        zx_label_df, zx_errors = process_labels(
            df, 
            label_column='战新二级产业类型',
            table_name='zx_policy',
            id_column='policy_id'
        )
        
        if not zx_label_df.empty:
            zx_success, zx_count = import_to_db(
                zx_label_df, 
                'policy_zx_label_table', 
                engine
            )
            if zx_success:
                logger.info(f"成功导入 {zx_count} 条战新标签记录")
        else:
            logger.info("没有有效的战新标签需要导入")
        
        # 步骤7: 生成导入报告
        duration = datetime.now() - start_time
        logger.info(f"导入完成! 总耗时: {duration}")
        logger.info(f"政策记录导入: {count}条")
        logger.info(f"国标标签导入: {len(gb_label_df)}条")
        logger.info(f"战新标签导入: {len(zx_label_df)}条")
        
        return {
            'policy_records': count,
            'gb_labels': len(gb_label_df),
            'zx_labels': len(zx_label_df),
            'duration': str(duration)
        }
    
    except Exception as e:
        logger.exception(f"处理过程中发生未预期错误: {str(e)}")
        return None

if __name__ == "__main__":
    # 配置Excel文件路径
    EXCEL_FILE = ["cleaned_output_政策打标签分表1.xlsx", 
        "cleaned_output_政策打标签分表2.xlsx", "cleaned_output_政策打标签分表3.xlsx", "cleaned_output_政策打标签分表4.xlsx",
        "cleaned_output_政策打标签分表5.xlsx", "cleaned_output_政策打标签分表6.xlsx", "cleaned_output_政策打标签分表7.xlsx", 
                  "cleaned_output_政策打标签分表8.xlsx", "cleaned_output_政策打标签分表9.xlsx", "cleaned_output_政策打标签分表10.xlsx", 
                  "cleaned_output_政策打标签分表11.xlsx", ]
    
    # 执行导入
    for input_file in EXCEL_FILE:
        result = main(input_file)
        
        if result:
            print("\n导入结果摘要:")
            print(f"- 政策记录: {result['policy_records']}条")
            print(f"- 国标标签: {result['gb_labels']}条")
            print(f"- 战新标签: {result['zx_labels']}条")
            print(f"- 总耗时: {result['duration']}")
        else:
            print("导入失败，请查看日志文件获取详细信息")