import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score, classification_report
from sklearn.tree import export_text
import os
import datetime

# 日志类，用于管理日志记录
class Logger:
    def __init__(self, log_file=None):
        if log_file is None:
            timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
            log_file = f"西瓜数据集决策树实验_{timestamp}.log"
        self.log_file = log_file
        self.create_log_directory()
        self.initialize_log()
    
    def create_log_directory(self):
        log_dir = os.path.dirname(self.log_file)
        if log_dir and not os.path.exists(log_dir):
            os.makedirs(log_dir)
    
    def initialize_log(self):
        with open(self.log_file, 'w', encoding='utf-8') as f:
            f.write(f"{'='*50}\n")
            f.write(f"西瓜数据集决策树实验\n")
            f.write(f"开始时间: {datetime.datetime.now()}\n")
            f.write(f"{'='*50}\n\n")
    
    def log(self, message, print_to_console=True):
        if print_to_console:
            print(message)
        with open(self.log_file, 'a', encoding='utf-8') as f:
            f.write(f"{message}\n")
    
    def log_section(self, title):
        self.log(f"\n{'='*50}\n{title}\n{'='*50}")
    
    def log_parameters(self, parameters):
        self.log("\n参数设置:")
        for param, value in parameters.items():
            self.log(f"{param}: {value}")

# 西瓜数据集
dataSet = [
    ['青绿', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],
    ['乌黑', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', '好瓜'],
    ['乌黑', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],
    ['青绿', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', '好瓜'],
    ['浅白', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', '好瓜'],
    ['青绿', '稍蜷', '浊响', '清晰', '稍凹', '软粘', '好瓜'],
    ['乌黑', '稍蜷', '浊响', '稍糊', '稍凹', '软粘', '好瓜'],
    ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '硬滑', '好瓜'],
    ['乌黑', '稍蜷', '沉闷', '稍糊', '稍凹', '硬滑', '坏瓜'],
    ['青绿', '硬挺', '清脆', '清晰', '平坦', '软粘', '坏瓜'],
    ['浅白', '硬挺', '清脆', '模糊', '平坦', '硬滑', '坏瓜'],
    ['浅白', '蜷缩', '浊响', '模糊', '平坦', '软粘', '坏瓜'],
    ['青绿', '稍蜷', '浊响', '稍糊', '凹陷', '硬滑', '坏瓜'],
    ['浅白', '稍蜷', '沉闷', '稍糊', '凹陷', '硬滑', '坏瓜'],
    ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '软粘', '坏瓜'],
    ['浅白', '蜷缩', '浊响', '模糊', '平坦', '硬滑', '坏瓜'],
    ['青绿', '蜷缩', '沉闷', '稍糊', '稍凹', '硬滑', '坏瓜']
]

# 特征名称
feature_names = ['色泽', '根蒂', '敲声', '纹理', '脐部', '触感']

# 创建DataFrame
def create_dataframe():
    df = pd.DataFrame(dataSet, columns=feature_names + ['好瓜'])
    return df

# 增强版数据预处理
def preprocess_data(df, test_size=0.3, logger=None):
    if logger:
        logger.log_section(f"数据预处理 (测试集比例: {test_size:.1%})")
    
    # 复制数据
    processed_df = df.copy()
    
    # 分离特征和标签
    X = processed_df.drop('好瓜', axis=1)
    y = processed_df['好瓜']
    
    # 对类别型特征进行编码
    categorical_cols = X.select_dtypes(include=['object']).columns
    for col in categorical_cols:
        X[col] = LabelEncoder().fit_transform(X[col])
        if logger:
            logger.log(f"特征 '{col}' 已编码")
    
    # 对标签进行编码
    y_encoded = LabelEncoder().fit_transform(y)
    
    # 划分训练集和测试集，增加随机性和分层
    X_train, X_test, y_train, y_test = train_test_split(
        X, y_encoded, test_size=test_size, random_state=42, shuffle=True, stratify=y)
    
    if logger:
        logger.log(f"训练集大小: {len(X_train)}, 测试集大小: {len(X_test)}")
        logger.log(f"训练集标签分布: {np.bincount(y_train)}")
        logger.log(f"测试集标签分布: {np.bincount(y_test)}")
    
    return X_train, X_test, y_train, y_test, X.columns

# 使用网格搜索优化决策树模型
def optimize_decision_tree(X_train, y_train, logger=None):
    if logger:
        logger.log_section("决策树模型优化")
    
    # 扩展参数搜索空间
    param_grid = {
        'criterion': ['gini', 'entropy'],
        'max_depth': [2, 3, 4, 5, 6, 7],  # 增加深度上限
        'min_samples_split': [2, 3, 4, 5],  # 调整分裂样本数
        'min_samples_leaf': [1, 2, 3, 4],  # 调整叶子节点样本数
        'max_features': [None, 'sqrt', 'log2'],  # 特征选择策略
        'class_weight': [None, 'balanced'],  # 处理类别不平衡
        'splitter': ['best', 'random'],  # 分裂策略
        'min_impurity_decrease': [0.0, 0.01, 0.02]  # 不纯度减少阈值
    }
    
    if logger:
        logger.log_parameters(param_grid)
    
    # 创建决策树分类器
    clf = DecisionTreeClassifier(random_state=42)
    
    # 使用StratifiedKFold确保类别平衡
    cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
    
    # 使用网格搜索
    grid_search = GridSearchCV(
        estimator=clf,
        param_grid=param_grid,
        cv=cv,
        scoring='accuracy',
        n_jobs=-1,
        verbose=1
    )
    
    # 执行网格搜索
    grid_search.fit(X_train, y_train)
    
    if logger:
        logger.log("\n最优参数:")
        for param, value in grid_search.best_params_.items():
            logger.log(f"{param}: {value}")
        
        logger.log(f"交叉验证最佳准确率: {grid_search.best_score_:.4f}")
    
    # 返回最优模型
    return grid_search.best_estimator_

# 评估模型
def evaluate_model(clf, X_train, X_test, y_train, y_test, logger=None):
    if logger:
        logger.log_section("模型评估")
    
    # 在训练集和测试集上进行预测
    y_train_pred = clf.predict(X_train)
    y_test_pred = clf.predict(X_test)
    
    # 计算准确率
    train_accuracy = accuracy_score(y_train, y_train_pred)
    test_accuracy = accuracy_score(y_test, y_test_pred)
    
    if logger:
        logger.log(f"训练集准确率: {train_accuracy:.4f}")
        logger.log(f"测试集准确率: {test_accuracy:.4f}")
        
        # 打印分类报告
        logger.log("\n测试集分类报告:")
        report = classification_report(y_test, y_test_pred, target_names=['否', '是'])
        logger.log(report)
    
    return train_accuracy, test_accuracy

# 输出决策树的文本表示
def print_decision_tree(clf, feature_names, logger=None, class_names=['否', '是'], output_file='decision_tree.txt'):
    if logger:
        logger.log_section("决策树文本表示")
    
    # 生成决策树的文本表示
    tree_text = export_text(
        clf,
        feature_names=list(feature_names),
        show_weights=True,
        decimals=2
    )
    
    # 保存文本到文件
    if not os.path.exists('output'):
        os.makedirs('output')
    with open(f'output/{output_file}', 'w', encoding='utf-8') as f:
        f.write(tree_text)
    
    if logger:
        logger.log(f"决策树文本表示已保存到 'output/{output_file}'")
        
        # 记录完整决策树到日志
        logger.log("\n完整决策树文本表示:", print_to_console=False)
        logger.log(tree_text, print_to_console=False)
    
    return tree_text

# 主函数
def main():
    # 创建日志记录器
    logger = Logger()
    
    # 设置中文显示
    import matplotlib.pyplot as plt
    plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
    plt.rcParams["axes.unicode_minus"] = False  # 解决负号显示问题
    
    # 记录实验配置
    logger.log_section("实验配置")
    logger.log(f"西瓜数据集样本数: {len(dataSet)}")
    logger.log(f"特征数: {len(feature_names)}")
    
    # 创建DataFrame
    df = create_dataframe()
    
    logger.log("\n数据基本信息：")
    # 捕获DataFrame信息的输出
    import io
    buffer = io.StringIO()
    df.info(buf=buffer)
    logger.log(buffer.getvalue())
    
    # 尝试不同的测试集比例
    test_sizes = [0.2, 0.25, 0.3]
    best_accuracy = 0
    best_model = None
    best_feature_names = None
    best_test_size = None
    
    # 检查test_sizes是否为空
    if not test_sizes:
        logger.log("错误：测试集比例列表为空，无法进行实验。")
        return
    
    # 确保至少执行一次循环，避免feature_names未被赋值
    first_iteration = True
    
    for test_size in test_sizes:
        logger.log_section(f"\n测试集比例: {test_size:.1%}")
        
        try:
            # 数据预处理
            X_train, X_test, y_train, y_test, current_feature_names = preprocess_data(df, test_size, logger)
            
            # 首次迭代时保存特征名称作为后备
            if first_iteration:
                best_feature_names = current_feature_names
                first_iteration = False
            
            # 优化决策树模型
            best_clf = optimize_decision_tree(X_train, y_train, logger)
            
            # 评估模型
            train_acc, test_acc = evaluate_model(best_clf, X_train, X_test, y_train, y_test, logger)
            
            # 记录最佳模型
            if test_acc > best_accuracy:
                best_accuracy = test_acc
                best_model = best_clf
                best_feature_names = current_feature_names  # 更新最佳特征名称
                best_test_size = test_size
                logger.log(f"新的最佳准确率: {best_accuracy:.4f} (测试集比例: {test_size:.1%})")
                
        except Exception as e:
            logger.log(f"错误：测试集比例 {test_size:.1%} 的实验失败: {str(e)}", print_to_console=True)
            continue  # 继续下一个测试集比例
    
    # 输出最佳决策树文本表示
    if best_model and best_feature_names is not None:
        logger.log_section("\n最佳模型详细信息")
        logger.log(f"最佳测试集准确率: {best_accuracy:.4f} (测试集比例: {best_test_size:.1%})")
        logger.log(f"最佳模型参数: {best_model.get_params()}")
        
        print_decision_tree(best_model, best_feature_names, logger, output_file='best_decision_tree.txt')
    else:
        logger.log("警告：未找到有效的最佳模型或特征名称，可能所有实验均失败。")
    
    # 结果总结
    logger.log_section("实验总结")
    if best_accuracy > 0:
        logger.log(f"最佳测试集准确率: {best_accuracy:.4f} (测试集比例: {best_test_size:.1%})")
        
        if best_accuracy >= 0.85:
            logger.log("恭喜！模型测试集准确率已达到或超过85%")
        else:
            logger.log(f"模型测试集准确率为 {best_accuracy:.2%}，略低于目标。由于数据集较小，可能存在一定波动。")
    else:
        logger.log("错误：所有实验均失败，未获得有效结果。")
    
    logger.log(f"\n实验日志已保存至: {logger.log_file}")

if __name__ == "__main__":
    main()