"""
数据处理工具模块扩展
"""
import logging
import pandas as pd
import numpy as np
import json
from datetime import datetime

logger = logging.getLogger(__name__)

class DataUtilsExt:
    """数据处理工具扩展类"""
    
    @staticmethod
    def fill_missing_values(df, fill_mapping):
        """
        填充缺失值
        
        Args:
            df: DataFrame
            fill_mapping: 填充映射 (列名 -> 填充值)
        
        Returns:
            pandas.DataFrame: 填充后的DataFrame
        """
        try:
            # 复制DataFrame
            result = df.copy()
            
            # 填充缺失值
            for column, fill_value in fill_mapping.items():
                if column in result.columns:
                    result[column] = result[column].fillna(fill_value)
            
            return result
        except Exception as e:
            logger.error(f"填充缺失值失败: {str(e)}")
            raise
    
    @staticmethod
    def filter_dataframe(df, filters):
        """
        过滤DataFrame
        
        Args:
            df: DataFrame
            filters: 过滤条件列表 [(列名, 操作符, 值), ...]
        
        Returns:
            pandas.DataFrame: 过滤后的DataFrame
        """
        try:
            # 复制DataFrame
            result = df.copy()
            
            # 应用过滤条件
            for column, operator, value in filters:
                if column in result.columns:
                    if operator == '==':
                        result = result[result[column] == value]
                    elif operator == '!=':
                        result = result[result[column] != value]
                    elif operator == '>':
                        result = result[result[column] > value]
                    elif operator == '>=':
                        result = result[result[column] >= value]
                    elif operator == '<':
                        result = result[result[column] < value]
                    elif operator == '<=':
                        result = result[result[column] <= value]
                    elif operator == 'in':
                        result = result[result[column].isin(value)]
                    elif operator == 'not in':
                        result = result[~result[column].isin(value)]
                    elif operator == 'contains':
                        result = result[result[column].str.contains(value, na=False)]
                    elif operator == 'not contains':
                        result = result[~result[column].str.contains(value, na=False)]
                    elif operator == 'startswith':
                        result = result[result[column].str.startswith(value, na=False)]
                    elif operator == 'endswith':
                        result = result[result[column].str.endswith(value, na=False)]
                    elif operator == 'isna':
                        result = result[result[column].isna()]
                    elif operator == 'notna':
                        result = result[result[column].notna()]
            
            return result
        except Exception as e:
            logger.error(f"过滤DataFrame失败: {str(e)}")
            raise
    
    @staticmethod
    def compare_dataframes(df1, df2, key_columns):
        """
        比较两个DataFrame
        
        Args:
            df1: 第一个DataFrame
            df2: 第二个DataFrame
            key_columns: 键列名列表
        
        Returns:
            tuple: (新增的行, 删除的行, 修改的行)
        """
        try:
            # 确保键列存在
            for key in key_columns:
                if key not in df1.columns or key not in df2.columns:
                    raise ValueError(f"键列 {key} 不存在")
            
            # 设置索引
            df1_indexed = df1.set_index(key_columns)
            df2_indexed = df2.set_index(key_columns)
            
            # 找出共同的索引
            common_indices = df1_indexed.index.intersection(df2_indexed.index)
            
            # 找出新增的行
            added_indices = df2_indexed.index.difference(df1_indexed.index)
            added_rows = df2_indexed.loc[added_indices].reset_index()
            
            # 找出删除的行
            deleted_indices = df1_indexed.index.difference(df2_indexed.index)
            deleted_rows = df1_indexed.loc[deleted_indices].reset_index()
            
            # 找出修改的行
            modified_rows = []
            for idx in common_indices:
                row1 = df1_indexed.loc[idx]
                row2 = df2_indexed.loc[idx]
                
                # 比较非键列的值
                if not row1.equals(row2):
                    # 合并键列和修改后的行
                    if isinstance(idx, tuple):
                        key_values = dict(zip(key_columns, idx))
                    else:
                        key_values = {key_columns[0]: idx}
                    
                    # 创建修改前后的行
                    before_row = row1.to_dict()
                    after_row = row2.to_dict()
                    
                    # 添加键列
                    before_row.update(key_values)
                    after_row.update(key_values)
                    
                    modified_rows.append({
                        'before': before_row,
                        'after': after_row
                    })
            
            return added_rows, deleted_rows, modified_rows
        except Exception as e:
            logger.error(f"比较DataFrame失败: {str(e)}")
            raise
    
    @staticmethod
    def detect_schema_changes(df1, df2):
        """
        检测模式变化
        
        Args:
            df1: 第一个DataFrame
            df2: 第二个DataFrame
        
        Returns:
            dict: 模式变化
        """
        try:
            # 获取列集合
            columns1 = set(df1.columns)
            columns2 = set(df2.columns)
            
            # 找出新增的列
            added_columns = columns2 - columns1
            
            # 找出删除的列
            deleted_columns = columns1 - columns2
            
            # 找出类型变化的列
            type_changes = {}
            for column in columns1.intersection(columns2):
                type1 = df1[column].dtype
                type2 = df2[column].dtype
                
                if type1 != type2:
                    type_changes[column] = {
                        'from': str(type1),
                        'to': str(type2)
                    }
            
            return {
                'added_columns': list(added_columns),
                'deleted_columns': list(deleted_columns),
                'type_changes': type_changes
            }
        except Exception as e:
            logger.error(f"检测模式变化失败: {str(e)}")
            raise
    
    @staticmethod
    def generate_diff_sql(df1, df2, table_name, key_columns):
        """
        生成差异SQL
        
        Args:
            df1: 第一个DataFrame
            df2: 第二个DataFrame
            table_name: 表名
            key_columns: 键列名列表
        
        Returns:
            dict: SQL语句
        """
        try:
            # 比较DataFrame
            added_rows, deleted_rows, modified_rows = DataUtilsExt.compare_dataframes(df1, df2, key_columns)
            
            # 生成SQL语句
            insert_sqls = []
            delete_sqls = []
            update_sqls = []
            
            # 生成插入SQL
            for _, row in added_rows.iterrows():
                columns = ', '.join(row.index)
                values = ', '.join([f"'{v}'" if isinstance(v, str) else str(v) for v in row.values])
                insert_sqls.append(f"INSERT INTO {table_name} ({columns}) VALUES ({values});")
            
            # 生成删除SQL
            for _, row in deleted_rows.iterrows():
                where_clause = ' AND '.join([f"{k} = '{v}'" if isinstance(v, str) else f"{k} = {v}" for k, v in row.items() if k in key_columns])
                delete_sqls.append(f"DELETE FROM {table_name} WHERE {where_clause};")
            
            # 生成更新SQL
            for item in modified_rows:
                after_row = item['after']
                
                # 构建SET子句
                set_clause = ', '.join([f"{k} = '{v}'" if isinstance(v, str) else f"{k} = {v}" for k, v in after_row.items() if k not in key_columns])
                
                # 构建WHERE子句
                where_clause = ' AND '.join([f"{k} = '{v}'" if isinstance(v, str) else f"{k} = {v}" for k, v in after_row.items() if k in key_columns])
                
                update_sqls.append(f"UPDATE {table_name} SET {set_clause} WHERE {where_clause};")
            
            return {
                'insert': insert_sqls,
                'delete': delete_sqls,
                'update': update_sqls
            }
        except Exception as e:
            logger.error(f"生成差异SQL失败: {str(e)}")
            raise
    
    @staticmethod
    def generate_schema_sql(df, table_name):
        """
        生成模式SQL
        
        Args:
            df: DataFrame
            table_name: 表名
        
        Returns:
            str: 创建表的SQL语句
        """
        try:
            # 映射DataFrame类型到SQL类型
            type_mapping = {
                'int64': 'INTEGER',
                'float64': 'FLOAT',
                'bool': 'BOOLEAN',
                'datetime64[ns]': 'DATETIME',
                'object': 'TEXT'
            }
            
            # 构建列定义
            columns = []
            for column, dtype in df.dtypes.items():
                sql_type = type_mapping.get(str(dtype), 'TEXT')
                columns.append(f"{column} {sql_type}")
            
            # 生成创建表的SQL语句
            return f"CREATE TABLE {table_name} (\n  " + ",\n  ".join(columns) + "\n);"
        except Exception as e:
            logger.error(f"生成模式SQL失败: {str(e)}")
            raise
    
    @staticmethod
    def dataframe_to_dict_list(df):
        """
        将DataFrame转换为字典列表
        
        Args:
            df: DataFrame
        
        Returns:
            list: 字典列表
        """
        try:
            return df.to_dict(orient='records')
        except Exception as e:
            logger.error(f"将DataFrame转换为字典列表失败: {str(e)}")
            raise
    
    @staticmethod
    def dict_list_to_dataframe(dict_list):
        """
        将字典列表转换为DataFrame
        
        Args:
            dict_list: 字典列表
        
        Returns:
            pandas.DataFrame: DataFrame
        """
        try:
            return pd.DataFrame(dict_list)
        except Exception as e:
            logger.error(f"将字典列表转换为DataFrame失败: {str(e)}")
            raise
    
    @staticmethod
    def pivot_table(df, index, columns, values, aggfunc='mean'):
        """
        创建透视表
        
        Args:
            df: DataFrame
            index: 索引列
            columns: 列
            values: 值
            aggfunc: 聚合函数
        
        Returns:
            pandas.DataFrame: 透视表
        """
        try:
            return pd.pivot_table(df, index=index, columns=columns, values=values, aggfunc=aggfunc)
        except Exception as e:
            logger.error(f"创建透视表失败: {str(e)}")
            raise
    
    @staticmethod
    def group_by(df, by, agg_dict):
        """
        分组聚合
        
        Args:
            df: DataFrame
            by: 分组列
            agg_dict: 聚合字典 (列名 -> 聚合函数)
        
        Returns:
            pandas.DataFrame: 聚合结果
        """
        try:
            return df.groupby(by).agg(agg_dict).reset_index()
        except Exception as e:
            logger.error(f"分组聚合失败: {str(e)}")
            raise