import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report

# 设置中文字体显示
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC", "Arial Unicode MS"]
plt.rcParams['axes.unicode_minus'] = False  # 正确显示负号

class PenguinDataAnalysis:
    def __init__(self):
        self.url = "https://raw.githubusercontent.com/MicrosoftDocs/mslearn-introduction-to-machine-learning/main/Data/ml-basics/penguins.csv"
        self.df = None
        self.clean_df = None
        self.X_train = None
        self.X_test = None
        self.y_train = None
        self.y_test = None
        self.model = None
        self.y_pred = None

    def load_data(self):
        """从URL加载企鹅数据集"""
        print("正在从GitHub加载企鹅数据集...")
        self.df = pd.read_csv(self.url)
        print(f"数据集加载成功，共 {len(self.df)} 行数据")
        return self.df

    def explore_data(self):
        """探索数据集的基本信息"""
        print("\n=== 数据集基本信息 ===")
        print(self.df.info())
        
        print("\n=== 数据集前5行数据 ===")
        print(self.df.head())
        
        print("\n=== 数据集统计描述 ===")
        print(self.df.describe())

    def visualize_species_distribution(self):
        """可视化企鹅物种分布"""
        plt.figure(figsize=(10, 6))
        species_counts = self.df['Species'].value_counts()
        species_counts.plot(kind='bar', color=['#FF9AA2', '#FFB7B2', '#FFDAC1', '#E2F0CB', '#B5EAD7'])
        plt.title('企鹅物种分布', fontsize=16)
        plt.xlabel('物种', fontsize=12)
        plt.ylabel('数量', fontsize=12)
        plt.xticks(rotation=0, fontsize=10)
        plt.grid(axis='y', linestyle='--', alpha=0.7)
        plt.tight_layout()
        plt.savefig('penguin_species_distribution.png', dpi=300, bbox_inches='tight')
        plt.close()
        print("\n企鹅物种分布图已保存为 'penguin_species_distribution.png'")

    def visualize_feature_distributions(self):
        """可视化各特征在不同物种间的分布"""
        features = ['CulmenLength', 'CulmenDepth', 'FlipperLength']
        fig, axes = plt.subplots(1, 3, figsize=(18, 6))
        
        colors = {'Adelie': '#FF9AA2', 'Chinstrap': '#B5EAD7', 'Gentoo': '#C7CEEA'}
        
        for i, feature in enumerate(features):
            for species in self.df['Species'].dropna().unique():
                species_data = self.df[self.df['Species'] == species][feature].dropna()
                sns.kdeplot(species_data, ax=axes[i], label=species, fill=True, 
                           alpha=0.5, color=colors.get(species, '#888888'))
            axes[i].set_title(f'{feature} 在不同物种中的分布', fontsize=14)
            axes[i].set_xlabel(feature, fontsize=12)
            axes[i].set_ylabel('密度', fontsize=12)
            axes[i].legend(title='物种')
            axes[i].grid(alpha=0.3)
        
        plt.tight_layout()
        plt.savefig('penguin_feature_distributions.png', dpi=300, bbox_inches='tight')
        plt.close()
        print("特征分布图已保存为 'penguin_feature_distributions.png'")

    def visualize_boxplots(self):
        """使用箱线图可视化特征在不同物种间的分布"""
        features = ['CulmenLength', 'CulmenDepth', 'FlipperLength']
        fig, axes = plt.subplots(1, 3, figsize=(18, 6))
        
        for i, feature in enumerate(features):
            sns.boxplot(x='Species', y=feature, data=self.df, ax=axes[i], 
                       palette=['#FF9AA2', '#B5EAD7', '#C7CEEA'])
            sns.stripplot(x='Species', y=feature, data=self.df, ax=axes[i], 
                         color='black', size=3, jitter=True, alpha=0.3)
            axes[i].set_title(f'{feature} 在不同物种中的箱线图', fontsize=14)
            axes[i].set_xlabel('物种', fontsize=12)
            axes[i].set_ylabel(feature, fontsize=12)
            axes[i].grid(axis='y', alpha=0.3)
        
        plt.tight_layout()
        plt.savefig('penguin_feature_boxplots.png', dpi=300, bbox_inches='tight')
        plt.close()
        print("箱线图已保存为 'penguin_feature_boxplots.png'")

    def check_missing_values(self):
        """检查并处理缺失值"""
        print("\n=== 缺失值情况 ===")
        missing_values = self.df.isnull().sum()
        print(missing_values)
        
        # 可视化缺失值
        plt.figure(figsize=(10, 6))
        missing_values.plot(kind='bar', color='#FF9AA2')
        plt.title('各列缺失值数量', fontsize=16)
        plt.xlabel('列名', fontsize=12)
        plt.ylabel('缺失值数量', fontsize=12)
        plt.grid(axis='y', alpha=0.3)
        plt.tight_layout()
        plt.savefig('missing_values.png', dpi=300, bbox_inches='tight')
        plt.close()
        print("缺失值可视化图已保存为 'missing_values.png'")
        
        # 删除缺失值
        self.clean_df = self.df.dropna()
        print(f"\n删除缺失值后的数据形状: {self.clean_df.shape}")
        
        # 保存清洗后的数据
        self.clean_df.to_csv('clean_penguin_data.csv', index=False)
        print("清洗后的数据已保存为 'clean_penguin_data.csv'")

    def prepare_training_data(self):
        """准备训练数据"""
        # 提取特征和标签
        features = ['CulmenLength', 'CulmenDepth', 'FlipperLength']
        X = self.clean_df[features]
        y = self.clean_df['Species']
        
        # 划分训练集和测试集（70%训练，30%测试）
        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
            X, y, test_size=0.3, random_state=42, stratify=y
        )
        
        print(f"\n训练集大小: {self.X_train.shape}")
        print(f"测试集大小: {self.X_test.shape}")

    def train_model(self):
        """训练逻辑回归模型"""
        print("\n正在训练逻辑回归模型...")
        # 创建并训练逻辑回归模型
        self.model = LogisticRegression(max_iter=200, random_state=42)
        self.model.fit(self.X_train, self.y_train)
        print("模型训练完成")

    def evaluate_model(self):
        """评估模型性能"""
        # 预测测试集
        self.y_pred = self.model.predict(self.X_test)
        
        # 计算准确率
        accuracy = accuracy_score(self.y_test, self.y_pred)
        print(f"\n模型准确率: {accuracy:.4f}")
        
        # 生成分类报告
        print("\n=== 分类报告 ===")
        class_report = classification_report(self.y_test, self.y_pred)
        print(class_report)
        
        # 生成混淆矩阵并可视化
        cm = confusion_matrix(self.y_test, self.y_pred)
        plt.figure(figsize=(10, 8))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', 
                   xticklabels=self.model.classes_, 
                   yticklabels=self.model.classes_)
        plt.title('模型混淆矩阵', fontsize=16)
        plt.xlabel('预测类别', fontsize=12)
        plt.ylabel('实际类别', fontsize=12)
        plt.tight_layout()
        plt.savefig('confusion_matrix.png', dpi=300, bbox_inches='tight')
        plt.close()
        print("混淆矩阵图已保存为 'confusion_matrix.png'")

    def feature_importance(self):
        """分析特征重要性"""
        # 获取模型系数
        coefficients = self.model.coef_
        features = ['CulmenLength', 'CulmenDepth', 'FlipperLength']
        
        # 可视化特征重要性
        plt.figure(figsize=(12, 8))
        x = np.arange(len(features))
        width = 0.25
        
        for i, species in enumerate(self.model.classes_):
            plt.bar(x + i*width, coefficients[i], width, label=species)
        
        plt.title('各特征对不同物种分类的重要性', fontsize=16)
        plt.xlabel('特征', fontsize=12)
        plt.ylabel('系数值', fontsize=12)
        plt.xticks(x + width, features, fontsize=10)
        plt.legend(title='物种')
        plt.grid(axis='y', alpha=0.3)
        plt.tight_layout()
        plt.savefig('feature_importance.png', dpi=300, bbox_inches='tight')
        plt.close()
        print("特征重要性图已保存为 'feature_importance.png'")

    def generate_correlation_heatmap(self):
        """生成特征相关性热图"""
        numeric_features = ['CulmenLength', 'CulmenDepth', 'FlipperLength', 'BodyMass']
        corr_matrix = self.clean_df[numeric_features].corr()
        
        plt.figure(figsize=(10, 8))
        sns.heatmap(corr_matrix, annot=True, cmap='coolwarm', fmt='.2f', square=True)
        plt.title('企鹅特征相关性热图', fontsize=16)
        plt.tight_layout()
        plt.savefig('correlation_heatmap.png', dpi=300, bbox_inches='tight')
        plt.close()
        print("相关性热图已保存为 'correlation_heatmap.png'")

    def create_summary_report(self):
        """创建分析摘要报告"""
        with open('penguin_analysis_report.txt', 'w', encoding='utf-8') as f:
            f.write("# 企鹅数据分析报告\n\n")
            f.write("## 1. 项目概述\n")
            f.write("本项目对企鹅数据集进行了全面的分析，包括数据探索、可视化分析、数据清洗、特征工程和机器学习建模。\n\n")
            
            f.write("## 2. 数据来源\n")
            f.write(f"数据来源于Microsoft Learn机器学习基础课程：{self.url}\n\n")
            
            f.write("## 3. 数据探索\n")
            f.write(f"- 原始数据量：{len(self.df)}行\n")
            f.write(f"- 清洗后数据量：{len(self.clean_df)}行\n")
            
            # 将numpy.int64转换为字符串
            unique_species = self.clean_df['Species'].unique()
            species_strings = [str(species) for species in unique_species]
            f.write(f"- 包含的物种：{', '.join(species_strings)}\n")
            f.write(f"- 主要特征：CulmenLength, CulmenDepth, FlipperLength, BodyMass\n\n")
            
            f.write("## 4. 模型性能\n")
            f.write(f"- 逻辑回归模型准确率：{accuracy_score(self.y_test, self.y_pred):.4f}\n\n")
            
            f.write("## 5. 结论\n")
            f.write("1. 三种企鹅物种在形态特征上存在明显差异\n")
            f.write("2. FlipperLength和BodyMass呈现较强的正相关关系\n")
            f.write("3. 使用基本形态特征可以较好地预测企鹅物种\n\n")
            
            f.write("## 6. 生成的文件\n")
            f.write("- penguin_species_distribution.png：企鹅物种分布图\n")
            f.write("- penguin_feature_distributions.png：特征分布图\n")
            f.write("- penguin_feature_boxplots.png：特征箱线图\n")
            f.write("- missing_values.png：缺失值可视化图\n")
            f.write("- confusion_matrix.png：模型混淆矩阵\n")
            f.write("- feature_importance.png：特征重要性图\n")
            f.write("- correlation_heatmap.png：相关性热图\n")
            f.write("- clean_penguin_data.csv：清洗后的数据文件\n")
            f.write("- penguin_analysis_report.txt：分析报告\n")
        
        print("\n分析报告已保存为 'penguin_analysis_report.txt'")

    def run_analysis(self):
        """运行完整的数据分析流程"""
        self.load_data()
        self.explore_data()
        self.visualize_species_distribution()
        self.visualize_feature_distributions()
        self.visualize_boxplots()
        self.check_missing_values()
        self.generate_correlation_heatmap()
        self.prepare_training_data()
        self.train_model()
        self.evaluate_model()
        self.feature_importance()
        self.create_summary_report()
        print("\n企鹅数据分析项目完成！")

# 运行分析
if __name__ == "__main__":
    analysis = PenguinDataAnalysis()
    analysis.run_analysis()