import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
import matplotlib.pyplot as plt
import seaborn as sns
# 设置中文字体支持
plt.rcParams['font.sans-serif'] = ['SimHei', 'Arial Unicode MS', 'Microsoft YaHei']
plt.rcParams['axes.unicode_minus'] = False
# 加载数据集
data = pd.read_csv('./datasets/watermelon.csv')
X = data.iloc[:, 1:3].values  # 密度和含糖率作为特征
y = data.iloc[:, 3].values  # 标签（好瓜=1，坏瓜=0）


# 实验1：不同分割比例比较
def evaluate_split_ratio(ratio):
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=ratio, random_state=42)

    # 创建逻辑回归模型
    model = LogisticRegression(C=1.0, solver='lbfgs', max_iter=1000)
    model.fit(X_train, y_train)

    # 预测和评估
    y_pred = model.predict(X_test)

    # 计算评估指标
    accuracy = metrics.accuracy_score(y_test, y_pred)
    precision = metrics.precision_score(y_test, y_pred)
    recall = metrics.recall_score(y_test, y_pred)
    f1 = metrics.f1_score(y_test, y_pred)
    conf_matrix = metrics.confusion_matrix(y_test, y_pred)

    # 可视化混淆矩阵
    plt.figure(figsize=(6, 4))
    sns.heatmap(conf_matrix, annot=True, fmt='d', cmap='Blues',
                xticklabels=['坏瓜', '好瓜'], yticklabels=['坏瓜', '好瓜'])
    plt.title(f'混淆矩阵 (分割比例: {1 - ratio}:{ratio})')
    plt.ylabel('真实标签')
    plt.xlabel('预测标签')
    plt.show()

    return accuracy, precision, recall, f1, conf_matrix


# 实验2：正则化参数调整
def evaluate_regularization(ratio, C_values):
    results = []
    for C in C_values:
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=ratio, random_state=42)

        model = LogisticRegression(C=C, solver='lbfgs', max_iter=1000)
        model.fit(X_train, y_train)

        y_pred = model.predict(X_test)

        # 计算评估指标
        accuracy = metrics.accuracy_score(y_test, y_pred)
        precision = metrics.precision_score(y_test, y_pred)
        recall = metrics.recall_score(y_test, y_pred)
        f1 = metrics.f1_score(y_test, y_pred)

        results.append({
            'C': C,
            'Accuracy': accuracy,
            'Precision': precision,
            'Recall': recall,
            'F1': f1
        })

    # 转换为DataFrame
    results_df = pd.DataFrame(results)

    # 可视化结果
    plt.figure(figsize=(10, 6))
    plt.plot(results_df['C'], results_df['Accuracy'], 'o-', label='准确率')
    plt.plot(results_df['C'], results_df['Precision'], 's-', label='查准率')
    plt.plot(results_df['C'], results_df['Recall'], 'd-', label='查全率')
    plt.plot(results_df['C'], results_df['F1'], '^-', label='F1值')
    plt.xscale('log')
    plt.title(f'正则化参数C对性能的影响 (分割比例: {1 - ratio}:{ratio})')
    plt.xlabel('正则化参数C (log scale)')
    plt.ylabel('分数')
    plt.legend()
    plt.grid(True)
    plt.show()

    return results_df


# 执行实验
if __name__ == "__main__":
    # 实验1：不同分割比例比较
    print("=" * 50)
    print("实验1：不同分割比例比较")
    print("=" * 50)

    # 8:2分割
    acc_8, pre_8, rec_8, f1_8, _ = evaluate_split_ratio(0.2)
    print("\n8:2 分割结果:")
    print(f"准确率: {acc_8:.4f}, 查准率: {pre_8:.4f}, 查全率: {rec_8:.4f}, F1值: {f1_8:.4f}")

    # 7:3分割
    acc_7, pre_7, rec_7, f1_7, _ = evaluate_split_ratio(0.3)
    print("\n7:3 分割结果:")
    print(f"准确率: {acc_7:.4f}, 查准率: {pre_7:.4f}, 查全率: {rec_7:.4f}, F1值: {f1_7:.4f}")

    # 实验2：正则化参数调整
    print("\n" + "=" * 50)
    print("实验2：正则化参数调整")
    print("=" * 50)

    C_values = [0.001, 0.01, 0.1, 1, 10, 100, 1000]

    # 8:2分割下的正则化参数实验
    print("\n8:2 分割下不同C值的结果:")
    results_8 = evaluate_regularization(0.2, C_values)
    print(results_8)

    # 7:3分割下的正则化参数实验
    print("\n7:3 分割下不同C值的结果:")
    results_7 = evaluate_regularization(0.3, C_values)
    print(results_7)