import io
import os
import warnings
from typing import Any, Dict, List, Tuple

import gradio as gr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from PIL import Image

warnings.filterwarnings('ignore')

class DataAnalyzer:
    """数据分析工具，用于识别无效的数据列"""
    
    def __init__(self):
        """初始化分析器"""
        self.data = None
        self.analysis_results = {}
        self.invalid_columns = {}
    
    def load_data(self, file) -> pd.DataFrame:
        """加载数据文件（CSV或XLSX）"""
        file_ext = os.path.splitext(file.name)[1].lower()
        
        if file_ext == '.csv':
            # 尝试不同的编码方式
            encodings = ['utf-8', 'gbk', 'gb2312', 'iso-8859-1']
            for encoding in encodings:
                try:
                    self.data = pd.read_csv(file.name, encoding=encoding)
                    break
                except UnicodeDecodeError:
                    continue
                except Exception as e:
                    return f"读取CSV文件时出错: {str(e)}"
        elif file_ext == '.xlsx':
            try:
                self.data = pd.read_excel(file.name)
            except Exception as e:
                return f"读取XLSX文件时出错: {str(e)}"
        else:
            return "不支持的文件格式，请上传CSV或XLSX文件"
        
        return self.data
    
    def check_missing_values(self, column: str) -> Dict:
        """检查缺失值"""
        series = self.data[column]
        missing_count = series.isnull().sum()
        missing_percentage = (missing_count / len(series)) * 100
        
        result = {
            "missing_count": int(missing_count),
            "missing_percentage": round(missing_percentage, 2),
            "is_invalid": missing_percentage > 30,  # 如果缺失值超过30%，则认为是无效列
            "reason": f"缺失值占比{round(missing_percentage, 2)}%" if missing_percentage > 30 else "缺失值比例在可接受范围内"
        }
        
        return result
    
    def check_constant_values(self, column: str) -> Dict:
        """检查常数值（单一值）"""
        series = self.data[column]
        nunique = series.nunique()
        unique_percentage = (nunique / len(series)) * 100
        
        result = {
            "unique_values_count": int(nunique),
            "unique_percentage": round(unique_percentage, 2),
            "is_invalid": nunique <= 1,  # 如果列中只有一个唯一值（或全部是空值），则认为是无效列
            "reason": "列中所有值都相同" if nunique <= 1 else "列中包含多个不同的值"
        }
        
        return result
    
    def check_high_cardinality(self, column: str) -> Dict:
        """检查高基数（唯一值过多）"""
        series = self.data[column]
        nunique = series.nunique()
        unique_percentage = (nunique / len(series)) * 100
        
        result = {
            "unique_values_count": int(nunique),
            "unique_percentage": round(unique_percentage, 2),
            "is_invalid": unique_percentage > 95 and len(series) > 10,  # 如果唯一值占比超过95%且数据量足够大，则可能是ID列或无意义列
            "reason": f"唯一值占比{round(unique_percentage, 2)}%，可能是ID列或唯一标识符" if unique_percentage > 95 and len(series) > 10 else "唯一值比例在可接受范围内"
        }
        
        return result
    
    def check_low_variance(self, column: str) -> Dict:
        """检查低方差（对数值列）"""
        series = self.data[column]
        result = {"is_invalid": False, "reason": "不适用（非数值列）"}
        
        if pd.api.types.is_numeric_dtype(series):
            # 对于数值列，计算方差
            try:
                variance = series.var()
                normalized_variance = variance / (series.max() - series.min()) if series.max() != series.min() else 0
                
                result = {
                    "variance": float(variance),
                    "normalized_variance": float(normalized_variance),
                    "is_invalid": normalized_variance < 0.01,  # 如果标准化方差小于0.01，则认为是低方差列
                    "reason": f"标准化方差为{round(normalized_variance, 4)}，变化很小" if normalized_variance < 0.01 else "方差在可接受范围内"
                }
            except:
                result = {"is_invalid": False, "reason": "计算方差时出错"}
                
        return result
    
    def check_correlation(self, column: str) -> Dict:
        """检查与其他列的高相关性"""
        if not pd.api.types.is_numeric_dtype(self.data[column]):
            return {"is_invalid": False, "reason": "不适用（非数值列）"}
        
        # 只选择数值列进行相关性分析
        numeric_columns = self.data.select_dtypes(include=[np.number]).columns.tolist()
        if column not in numeric_columns or len(numeric_columns) <= 1:
            return {"is_invalid": False, "reason": "不适用（没有其他数值列可比较）"}
        
        # 计算相关性
        try:
            correlations = {}
            for col in numeric_columns:
                if col != column:
                    corr = self.data[column].corr(self.data[col])
                    if not np.isnan(corr) and abs(corr) > 0.9:  # 相关性绝对值大于0.9
                        correlations[col] = round(corr, 3)
            
            if correlations:
                highest_corr_col = max(correlations.items(), key=lambda x: abs(x[1]))
                return {
                    "high_correlations": correlations,
                    "is_invalid": True,
                    "reason": f"与列 '{highest_corr_col[0]}' 的相关性为 {highest_corr_col[1]}，可能是冗余列"
                }
            else:
                return {"is_invalid": False, "reason": "没有与其他列的高相关性"}
        except:
            return {"is_invalid": False, "reason": "计算相关性时出错"}
    
    def check_string_issues(self, column: str) -> Dict:
        """检查字符串列的问题（如果适用）"""
        series = self.data[column]
        if not pd.api.types.is_string_dtype(series) and not pd.api.types.is_object_dtype(series):
            return {"is_invalid": False, "reason": "不适用（非字符串列）"}
        
        # 将非字符串值转换为字符串或NaN
        series = series.apply(lambda x: str(x) if not pd.isna(x) else x)
        
        # 检查空字符串
        empty_strings = (series == '').sum()
        empty_percentage = (empty_strings / len(series)) * 100
        
        # 检查全是空格的字符串
        whitespace_strings = series.str.isspace().sum() if hasattr(series.str, 'isspace') else 0
        whitespace_percentage = (whitespace_strings / len(series)) * 100
        
        result = {
            "empty_strings_count": int(empty_strings),
            "empty_percentage": round(empty_percentage, 2),
            "whitespace_strings_count": int(whitespace_strings),
            "whitespace_percentage": round(whitespace_percentage, 2),
            "is_invalid": (empty_percentage + whitespace_percentage) > 30,  # 如果空字符串和空格字符串总和超过30%，则认为是无效列
            "reason": f"空字符串和空白字符串占比{round(empty_percentage + whitespace_percentage, 2)}%" if (empty_percentage + whitespace_percentage) > 30 else "字符串格式正常"
        }
        
        return result
    
    def check_data_leakage(self, column: str) -> Dict:
        """检查潜在的数据泄漏问题"""
        suspicious_names = ['id', 'uuid', 'key', 'primary', 'identifier', '标识', '编号']
        col_lower = column.lower()
        
        is_suspicious = any(name in col_lower for name in suspicious_names)
        nunique = self.data[column].nunique()
        row_count = len(self.data)
        
        result = {
            "is_invalid": is_suspicious and (nunique > row_count * 0.9),
            "reason": f"列名包含标识符关键词且唯一值比例高({round(nunique/row_count*100, 2)}%)，可能是ID列" if (is_suspicious and nunique > row_count * 0.9) else "不是标识符列"
        }
        
        return result
    
    def check_date_time_issues(self, column: str) -> Dict:
        """检查日期时间列的问题"""
        series = self.data[column]
        
        # 尝试将列转换为datetime
        is_datetime = False
        try:
            pd.to_datetime(series)
            is_datetime = True
        except:
            pass
        
        if not is_datetime:
            return {"is_invalid": False, "reason": "不适用（非日期时间列）"}
        
        # 对于日期时间列，检查是否只有一个唯一值
        nunique = series.nunique()
        
        # 检查是否是未来日期（可能是错误的）
        try:
            datetime_series = pd.to_datetime(series)
            future_dates = (datetime_series > pd.Timestamp.now()).sum()
            future_percentage = (future_dates / len(series)) * 100
            
            result = {
                "unique_dates": int(nunique),
                "future_dates_count": int(future_dates),
                "future_dates_percentage": round(future_percentage, 2),
                "is_invalid": nunique <= 1 or future_percentage > 50,
                "reason": "只有一个唯一日期值" if nunique <= 1 else (f"{round(future_percentage, 2)}%的日期是未来日期，可能有误" if future_percentage > 50 else "日期时间格式正常")
            }
            return result
        except:
            return {"is_invalid": nunique <= 1, "reason": "只有一个唯一日期值" if nunique <= 1 else "日期时间格式正常"}
    
    def generate_column_stats_plot(self, column: str) -> np.ndarray:
        """为列生成统计图表"""
        plt.figure(figsize=(10, 6))
        series = self.data[column]
        
        if pd.api.types.is_numeric_dtype(series):
            # 数值列：绘制直方图和箱线图
            fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
            
            # 直方图
            sns.histplot(series.dropna(), kde=True, ax=ax1)
            ax1.set_title(f'{column} 分布直方图')
            
            # 箱线图
            sns.boxplot(y=series.dropna(), ax=ax2)
            ax2.set_title(f'{column} 箱线图')
            
        elif pd.api.types.is_string_dtype(series) or pd.api.types.is_object_dtype(series):
            # 字符串列：显示最常见的值
            value_counts = series.value_counts().head(10)
            if len(value_counts) > 0:
                plt.figure(figsize=(10, 6))
                value_counts.plot(kind='bar')
                plt.title(f'{column} 前10个最常见值')
                plt.xticks(rotation=45)
            else:
                plt.text(0.5, 0.5, '没有非空值可供展示', ha='center', va='center', fontsize=14)
                
        else:
            # 其他类型：尝试转换为字符串并显示最常见的值
            try:
                value_counts = series.astype(str).value_counts().head(10)
                plt.figure(figsize=(10, 6))
                value_counts.plot(kind='bar')
                plt.title(f'{column} 前10个最常见值')
                plt.xticks(rotation=45)
            except:
                plt.text(0.5, 0.5, '无法为此列类型生成图表', ha='center', va='center', fontsize=14)
        
        plt.tight_layout()
        
        # 将图表转换为图像
        buf = io.BytesIO()
        plt.savefig(buf, format='png')
        plt.close()
        buf.seek(0)
        img = Image.open(buf)
        return np.array(img)
    
    def analyze_column(self, column: str) -> Dict:
        """分析单个列"""
        results = {}
        
        # 数据类型信息
        dtype = str(self.data[column].dtype)
        results["data_type"] = dtype
        
        # 运行各种检查
        results["missing_values"] = self.check_missing_values(column)
        results["constant_values"] = self.check_constant_values(column)
        results["high_cardinality"] = self.check_high_cardinality(column)
        results["low_variance"] = self.check_low_variance(column)
        results["correlation"] = self.check_correlation(column)
        results["string_issues"] = self.check_string_issues(column)
        results["data_leakage"] = self.check_data_leakage(column)
        results["date_time_issues"] = self.check_date_time_issues(column)
        
        # 统计图表
        # results["plot"] = self.generate_column_stats_plot(column)
        
        # 综合判断是否为无效列
        is_invalid = any(check["is_invalid"] for check in results.values() if isinstance(check, dict) and "is_invalid" in check)
        invalid_reasons = [check["reason"] for check in results.values() 
                         if isinstance(check, dict) and "is_invalid" in check and check["is_invalid"]]
        
        results["overall"] = {
            "is_invalid": is_invalid,
            "invalid_reasons": invalid_reasons
        }
        
        return results
    
    def analyze_data(self) -> Tuple[str, List[Dict]]:
        """分析所有数据列"""
        if self.data is None:
            return "请先上传数据文件", []
        
        self.analysis_results = {}
        self.invalid_columns = {}
        
        for column in self.data.columns:
            self.analysis_results[column] = self.analyze_column(column)
            if self.analysis_results[column]["overall"]["is_invalid"]:
                self.invalid_columns[column] = self.analysis_results[column]["overall"]["invalid_reasons"]
        
        # 生成总结报告
        if not self.invalid_columns:
            summary = "✅ 未发现无效数据列。"
        else:
            summary = f"⚠️ 发现 {len(self.invalid_columns)} 个潜在的无效数据列：\n\n"
            for col, reasons in self.invalid_columns.items():
                summary += f"**{col}**：\n"
                for i, reason in enumerate(reasons, 1):
                    summary += f"  {i}. {reason}\n"
                summary += "\n"
        
        # 准备结果输出
        result_list = []
        for column in self.data.columns:
            col_result = self.analysis_results[column]
            result_dict = {
                "列名": column,
                "数据类型": col_result["data_type"],
                "是否无效": "是" if col_result["overall"]["is_invalid"] else "否",
                "无效原因": " | ".join(col_result["overall"]["invalid_reasons"]) if col_result["overall"]["is_invalid"] else "无"
            }
            result_list.append(result_dict)
        
        return summary, result_list


def analyze_file(file):
    """处理上传的文件并分析无效列"""
    if file is None:
        return "请上传CSV或XLSX文件", None
    
    analyzer = DataAnalyzer()
    data = analyzer.load_data(file)
    
    if isinstance(data, str):  # 如果返回的是错误信息
        return data, None
    
    # 执行分析
    summary, result_list = analyzer.analyze_data()
    
    return summary, result_list


# 创建Gradio界面
with gr.Blocks(title="无效数据列分析工具") as app:
    gr.Markdown("# 无效数据列分析工具")
    gr.Markdown("上传CSV或XLSX文件，系统将分析并识别潜在的无效或问题数据列。")
    
    with gr.Row():
        file_input = gr.File(label="上传CSV或XLSX文件")
        
    with gr.Row():
        analyze_btn = gr.Button("开始分析")
        
    with gr.Row():
        with gr.Column():
            output_summary = gr.Markdown(label="分析结果摘要")
        
    with gr.Row():
        output_table = gr.DataFrame(label="详细分析结果")
    
    analyze_btn.click(
        fn=analyze_file,
        inputs=[file_input],
        outputs=[output_summary, output_table]
    )


if __name__ == "__main__":
    app.launch()
