import numpy as np
import pandas as pd
import sqlite3
import os
import re
import logging
from typing import Dict, List, Tuple, Optional
from en_to_cn_map import country_map


def translate_ref_area(df: pd.DataFrame, column_name: str = 'ref_area', 
    create_new_column: bool = True) -> pd.DataFrame:
    """
    将DataFrame中的ref_area列(或指定列)从国家代码(key)翻译为国家名称(value)
    
    Args:
        df: 输入的pandas DataFrame
        column_name: 要翻译的列名，默认为'ref_area'
        create_new_column: 是否创建新列来存储翻译结果，而不是替换原列
            True: 创建新列'country_name'存储翻译结果
            False: 直接替换原列的值
    
    Returns:
        pd.DataFrame: 处理后的DataFrame
    """
    # 检查指定列是否存在
    if column_name not in df.columns:
        print(f"警告：DataFrame中不存在列 '{column_name}'")
        return df
    
    # 创建副本以避免修改原始数据
    df_copy = df.copy()
    
    if create_new_column:
        # 创建新列存储翻译结果
        df_copy['country_name'] = df_copy[column_name].map(country_map).fillna(df_copy[column_name])
    else:
        # 直接替换原列
        df_copy[column_name] = df_copy[column_name].map(country_map).fillna(df_copy[column_name])
    
    return df_copy


def process_column_names(df: pd.DataFrame, keep_original: bool = False) -> Tuple[pd.DataFrame, Dict[str, str]]:
    """
    处理DataFrame列名：转换为小写，处理重复列名
    
    Args:
        df: 输入的pandas DataFrame
        keep_original: 是否保留原始列名作为新列（用于调试）
        
    Returns:
        tuple: (处理后的DataFrame, 列名映射字典)
    """
    
    original_columns = df.columns.tolist()
    new_columns = []
    column_mapping = {}  # 新列名 -> 原列名映射
    column_count = {}    # 记录每个小写列名出现的次数
    
    print("开始处理列名...")
    print(f"原始列名: {original_columns}")
    
    for i, col in enumerate(original_columns):
        # 转换为小写
        lower_col = col.lower()
        
        # 清理特殊字符，只保留字母、数字、下划线
        cleaned_col = re.sub(r'[^a-z0-9_]', '_', lower_col)
        cleaned_col = re.sub(r'_+', '_', cleaned_col)  # 移除连续下划线
        cleaned_col = cleaned_col.strip('_')  # 移除开头和结尾的下划线
        
        # 如果清理后为空，使用默认名称
        if not cleaned_col:
            cleaned_col = f'column_{i+1}'
        
        # 处理重复列名
        if cleaned_col in column_count:
            column_count[cleaned_col] += 1
            new_col_name = f"{cleaned_col}_{column_count[cleaned_col]}"
        else:
            column_count[cleaned_col] = 1
            new_col_name = cleaned_col
        
        new_columns.append(new_col_name)
        column_mapping[new_col_name] = col
        
        # 打印转换信息
        if new_col_name != col.lower():
            print(f"列名转换: '{col}' -> '{new_col_name}'")
    
    # 创建新的DataFrame
    if keep_original:
        # 保留原始列，添加新列
        df_processed = df.copy()
        for new_col, orig_col in zip(new_columns, original_columns):
            df_processed[new_col] = df[orig_col]
    else:
        # 直接重命名列
        df_processed = df.copy()
        df_processed.columns = new_columns
    
    print(f"处理后的列名: {new_columns}")
    print(f"列名映射: {column_mapping}")
    
    return df_processed, column_mapping


class CSVToDBImporter:
    """
    CSV文件导入数据库工具
    支持处理列名重复和单值列的情况
    """
    
    def __init__(self, db_type='mysql', db_config=None):
        """
        初始化数据库连接配置
        
        Args:
            db_type: 数据库类型 ('sqlite', 'mysql', 'postgresql')
            db_config: 数据库连接配置字典
        """
        self.db_type = db_type
        self.db_config = db_config or {}
        self.connection = None
        
        # 设置日志
        logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
        self.logger = logging.getLogger(__name__)
    
    def connect_to_db(self):
        """连接到数据库"""
        try:
            if self.db_type == 'sqlite':
                db_path = self.db_config.get('database', 'data.db')
                self.connection = sqlite3.connect(db_path)
                self.logger.info(f"连接到SQLite数据库: {db_path}")
            
            elif self.db_type == 'mysql':
                import pymysql
                self.connection = pymysql.connect(
                    host=self.db_config.get('host', 'localhost'),
                    user=self.db_config.get('user', 'root'),
                    password=self.db_config.get('password', ''),
                    database=self.db_config.get('database', 'test'),
                    charset='utf8mb4'
                )
                self.logger.info("连接到MySQL数据库")
            
            elif self.db_type == 'postgresql':
                import psycopg2
                self.connection = psycopg2.connect(
                    host=self.db_config.get('host', 'localhost'),
                    user=self.db_config.get('user', 'postgres'),
                    password=self.db_config.get('password', ''),
                    database=self.db_config.get('database', 'test'),
                    port=self.db_config.get('port', 5432)
                )
                self.logger.info("连接到PostgreSQL数据库")
            
            else:
                raise ValueError(f"不支持的数据库类型: {self.db_type}")
                
        except Exception as e:
            self.logger.error(f"数据库连接失败: {e}")
            raise
    
    def close_connection(self):
        """关闭数据库连接"""
        if self.connection:
            self.connection.close()
            self.logger.info("数据库连接已关闭")
    
    def analyze_columns(self, df: pd.DataFrame) -> Tuple[List[str], Dict[str, str]]:
        """
        分析数据框的列，处理重复列名和单值列
        
        Args:
            df: pandas数据框
            
        Returns:
            tuple: (有效的列名列表, 列名映射字典)
        """
        original_columns = df.columns.tolist()
        valid_columns = []
        column_mapping = {}  # 新列名 -> 原列名映射
        column_count = {}    # 记录每个列名出现的次数
        
        self.logger.info("开始分析列...")
        
        for i, col in enumerate(original_columns):
            # 检查是否为单值列
            unique_values = df[col].nunique()
            if unique_values <= 1:
                self.logger.info(f"列 '{col}' 只有一个唯一值，跳过创建")
                continue
            
            # 处理列名重复
            base_col_name = self.sanitize_column_name(col)
            if base_col_name in column_count:
                column_count[base_col_name] += 1
                new_col_name = f"{base_col_name}_{column_count[base_col_name]}"
            else:
                column_count[base_col_name] = 1
                new_col_name = base_col_name
            
            valid_columns.append(new_col_name)
            column_mapping[new_col_name] = col
            
            if new_col_name != col:
                self.logger.info(f"列名映射: '{col}' -> '{new_col_name}'")
        
        self.logger.info(f"分析完成，有效列数: {len(valid_columns)}")
        return valid_columns, column_mapping
    
    def sanitize_column_name(self, column_name: str) -> str:
        """
        清理列名，移除特殊字符，转换为小写
        
        Args:
            column_name: 原始列名
            
        Returns:
            str: 清理后的列名
        """
        # 移除特殊字符，只保留字母、数字和下划线
        cleaned = re.sub(r'[^a-zA-Z0-9_]', '_', str(column_name))
        # 移除连续的下划线
        cleaned = re.sub(r'_+', '_', cleaned)
        # 移除开头和结尾的下划线
        cleaned = cleaned.strip('_')
        # 转换为小写
        cleaned = cleaned.lower()
        
        # 如果列名为空，使用默认名称
        if not cleaned:
            cleaned = 'column'
        
        return cleaned
    
    def create_table_sql(self, table_name: str, df: pd.DataFrame, valid_columns: List[str]) -> str:
        """
        生成创建表的SQL语句
        
        Args:
            table_name: 表名
            df: 数据框
            valid_columns: 有效的列名列表
            
        Returns:
            str: 创建表的SQL语句
        """
        # 清理表名
        table_name = self.sanitize_column_name(table_name)
        
        # 根据数据库类型确定数据类型映射
        type_mapping = {
            'sqlite': self._get_sqlite_type_mapping(),
            'mysql': self._get_mysql_type_mapping(),
            'postgresql': self._get_postgresql_type_mapping()
        }
        
        column_definitions = []
        
        for col in valid_columns:
            original_col = [k for k, v in self.column_mapping.items() if v == col][0]
            sample_value = df[original_col].dropna().iloc[0] if not df[original_col].isna().all() else ''
            
            # 推断数据类型
            col_type = self.infer_data_type(df[original_col], original_col)
            sql_type = type_mapping[self.db_type].get(col_type, 'TEXT')
            
            column_definitions.append(f"{col} {sql_type}")
        
        sql = f"CREATE TABLE IF NOT EXISTS {table_name} (\n"
        sql += ",\n".join(column_definitions)
        sql += "\n);"
        
        return sql
    
    def _get_sqlite_type_mapping(self) -> Dict[str, str]:
        """SQLite数据类型映射"""
        return {
            'int': 'INTEGER',
            'float': 'REAL',
            'str': 'TEXT',
            'bool': 'INTEGER',
            'datetime': 'TEXT'
        }
    
    def _get_mysql_type_mapping(self) -> Dict[str, str]:
        """MySQL数据类型映射"""
        return {
            'int': 'INT NULL',  # 允许NULL值
            'float': 'FLOAT NULL',
            'str': 'VARCHAR(500)',
            'bool': 'BOOLEAN NULL',
            'datetime': 'DATETIME NULL',
            'year': 'YEAR NULL'  # 允许NULL值
        }
    
    def _get_postgresql_type_mapping(self) -> Dict[str, str]:
        """PostgreSQL数据类型映射"""
        return {
            'int': 'INTEGER',
            'float': 'DOUBLE PRECISION',
            'str': 'VARCHAR(255)',
            'bool': 'BOOLEAN',
            'datetime': 'TIMESTAMP'
        }
    
    def infer_data_type(self, series: pd.Series, column_name: str) -> str:
        """
        推断Series的数据类型
        
        Args:
            series: pandas Series
            
        Returns:
            str: 数据类型 ('int', 'float', 'str', 'bool', 'datetime')
        """
        # 检查是否为布尔类型
        if 'ref_year_price' in column_name:
            return 'str'
        if 'time_period' in column_name:
            return ''
        if series.dtype == 'bool':
            return 'bool'
        
        # 检查是否为年份数据（4位数字）
        try:
            if series.dropna().apply(lambda x: str(x).isdigit() and len(str(x)) == 4).all():
                return 'year'
        except:
            pass
        
        # 尝试转换为数值类型
        try:
            pd.to_numeric(series.dropna())
            if series.dropna().apply(lambda x: isinstance(x, int) or (isinstance(x, float) and x.is_integer())).all():
                return 'int'
            else:
                return 'float'
        except (ValueError, TypeError):
            pass
        
        # 尝试转换为日期时间类型
        try:
            pd.to_datetime(series.dropna())
            return 'datetime'
        except (ValueError, TypeError):
            pass
        
        # 默认为字符串类型
        return 'str'
    
    def clean_data_for_insertion(self, df: pd.DataFrame) -> List[Tuple]:
            """
            清理数据以便插入数据库
            
            Args:
                df: 要清理的DataFrame
                
            Returns:
                List[Tuple]: 清理后的数据元组列表
            """
            cleaned_data = []
            
            for _, row in df.iterrows():
                cleaned_row = []
                for value in row:
                    # 处理NaN值
                    if pd.isna(value):
                        cleaned_row.append(None)
                    # 处理数值NaN
                    elif isinstance(value, (int, float)) and np.isnan(value):
                        cleaned_row.append(None)
                    # 处理空字符串
                    elif value == '':
                        cleaned_row.append(None)
                    # 处理布尔值
                    elif isinstance(value, bool):
                        cleaned_row.append(int(value))
                    # 处理其他类型
                    else:
                        cleaned_row.append(value)
                cleaned_data.append(tuple(cleaned_row))
            
            return cleaned_data
    
    def import_csv_to_db(self, csv_file_path: str, table_name: str = None) -> bool:
        """
        导入CSV文件到数据库
        
        Args:
            csv_file_path: CSV文件路径
            table_name: 目标表名（如为None，则使用文件名）
            
        Returns:
            bool: 导入是否成功
        """
        """
        导入CSV文件到数据库
        
        Args:
            csv_file_path: CSV文件路径
            table_name: 目标表名（如为None，则使用文件名）
            
        Returns:
            bool: 导入是否成功
        """
        # 读取CSV文件
        self.logger.info(f"读取CSV文件: {csv_file_path}")
        df = pd.read_csv(csv_file_path)
        df = df.dropna(axis=1, how='all')
        df, transform_info = process_column_names(df)
        # 转换国家名称
        df = translate_ref_area(df)
        table_name = table_name.lower()
        # 如果没有指定表名，使用文件名
        if table_name is None:
            table_name = os.path.splitext(os.path.basename(csv_file_path))[0]
        
        # 分析列
        valid_columns, self.column_mapping = self.analyze_columns(df)
        
        if not valid_columns:
            self.logger.warning("没有有效的列可以导入")
            return False
        
        # 连接到数据库
        self.connect_to_db()
        
        # 创建表
        create_sql = self.create_table_sql(table_name, df, valid_columns)
        
        print('>>>', create_sql)
        self.logger.info(f"创建表SQL:\n{create_sql}")
        
        cursor = self.connection.cursor()
        cursor.execute(create_sql)
        
        # 准备插入数据
        # 选择有效的列
        df_filtered = df[[self.column_mapping[col] for col in valid_columns]]
        df_filtered.columns = valid_columns
        
        # 根据数据库类型选择正确的占位符
        if self.db_type == 'mysql':
            placeholders = ', '.join(['%s' for _ in valid_columns])
        else:
            placeholders = ', '.join(['?' for _ in valid_columns])
        
        insert_sql = f"INSERT INTO {table_name} ({', '.join(valid_columns)}) VALUES ({placeholders})"
        
        self.logger.info(f"插入数据SQL: {insert_sql}")
        
        # 批量插入数据以提高性能
        # 修复：将NaN值转换为None，以便MySQL正确处理
    
        
        data_tuples = self.clean_data_for_insertion(df_filtered)
        
        excel_file_path = csv_file_path.replace('.csv', '.xlsx')
        if len(df_filtered) > 1048570:
            new_file_path = excel_file_path.replace('.xlsx', '_xlsx.csv')
            df_filtered.to_csv(new_file_path, index=False)
        else:
            df_filtered.to_excel(excel_file_path, index=False)
        try:
            cursor.executemany(insert_sql, data_tuples)
            self.connection.commit()
            self.logger.info(f"成功导入 {len(df_filtered)} 行数据到表 {table_name}")
            return True
        except Exception as e:
            self.logger.error(f"数据插入失败: {e}")
            self.connection.rollback()
            return False
        finally:
            cursor.close()

def main():
    """主函数示例"""
    # 示例使用MySQL数据库
    db_config1={
        'host': '192.168.31.134',
        'user': 'root',
        'password': 'Password123@mysql',
        'database': 'oecd_data'
    }
    db_config2={
        'host': '172.27.73.31',
        'user': 'root',
        'password': 'alienware',
        'database': 'oecd_data'
    }
    # 公司数据库
    db_config3={
        'host': '192.168.1.19',
        'user': 'root',
        'password': 'tjd',
        'database': 'oecd_data'
    }
    importer = CSVToDBImporter(db_type='mysql', db_config=db_config1)
    
    # 示例CSV文件路径（请修改为实际路径）
    csv_file = "../income_saving_distribution.csv"  # 请替换为您的CSV文件路径
    table_name = 'income_saving_distribution'
    
    importer.import_csv_to_db(csv_file, table_name)


if __name__ == "__main__":
    main()