import paddle
from paddle.io import Dataset, DataLoader
import paddle.nn as nn
import paddle.nn.functional as F
import paddle.metric as metric
import numpy as np
import gc
import matplotlib.pyplot as plt
import yaml
import os
import argparse
from datetime import datetime
import json
import sys

# 创建数据集类
class TIMITDataset(Dataset):
    def __init__(self, X, y=None):
        self.data = paddle.to_tensor(X, dtype="float32")
        if y is not None:
            y = y.astype(np.int64)
            self.label = paddle.to_tensor(y)
        else:
            self.label = None

    def __getitem__(self, idx):
        if self.label is not None:
            return self.data[idx], self.label[idx]
        else:
            return self.data[idx], -1

    def __len__(self):
        return len(self.data)

# 定义模型类
class Classifier(nn.Layer):
    def __init__(self, config):
        super(Classifier, self).__init__()
        self.layers = nn.LayerList()
        self.bn_layers = nn.LayerList() if config.get('use_bn', False) else None
        self.dropout_layers = nn.LayerList() if config.get('use_dropout', False) else None
        self.act_func = getattr(nn, config['activation'])()
        self.dropout_rate = config.get('dropout_rate', 0.2)
        
        # 是否在最后一层使用Softmax激活函数
        self.use_last_layer_softmax = config.get('use_last_layer_softmax', False)
        if self.use_last_layer_softmax:
            self.last_layer_act = nn.Softmax(axis=1)
        
        # 构建网络层
        for i in range(len(config['layer_sizes']) - 1):
            in_size = config['layer_sizes'][i]
            out_size = config['layer_sizes'][i + 1]
            self.layers.append(nn.Linear(in_size, out_size))
            
            # 不在最后一层添加以下组件
            if i < len(config['layer_sizes']) - 2:
                # 如果使用批归一化，则添加批归一化层
                if config.get('use_bn', False):
                    self.bn_layers.append(nn.BatchNorm1D(out_size))
                
                # 如果使用Dropout，则添加Dropout层
                if config.get('use_dropout', False):
                    self.dropout_layers.append(nn.Dropout(p=self.dropout_rate))

    def forward(self, x):
        for i, layer in enumerate(self.layers):
            x = layer(x)
            
            # 对于最后一层的处理
            if i == len(self.layers) - 1:
                # 如果最后一层使用Softmax激活函数
                if self.use_last_layer_softmax:
                    x = self.last_layer_act(x)
            # 对于中间层的处理
            else:
                if self.bn_layers is not None:
                    x = self.bn_layers[i](x)
                x = self.act_func(x)
                if self.dropout_layers is not None:
                    x = self.dropout_layers[i](x)
                
        return x

# 加载配置文件
def load_config(config_path):
    with open(config_path, 'r', encoding='utf-8') as f:
        config = yaml.safe_load(f)
    return config

# 从嵌套配置中提取特定实验的配置
def get_experiment_config(config, experiment_name=None):
    if experiment_name is None or experiment_name not in config:
        # 使用默认配置
        exp_config = {k: v for k, v in config.items() if not isinstance(v, dict)}
    else:
        # 合并基础配置和特定实验配置
        exp_config = {k: v for k, v in config.items() if not isinstance(v, dict)}
        exp_config.update(config[experiment_name])
    
    return exp_config

# 设置随机种子以便复现
def same_seeds(seed):
    paddle.seed(seed)
    np.random.seed(seed)

# 训练模型
def train_model(model, train_loader, val_loader, config):
    criterion = nn.CrossEntropyLoss()
    
    # 设置优化器，如果使用L2正则化，添加weight_decay参数
    if config.get('use_l2_reg', False):
        weight_decay = config.get('weight_decay', 0.0001)
        optimizer = paddle.optimizer.Adam(
            parameters=model.parameters(), 
            learning_rate=config['learning_rate'],
            weight_decay=weight_decay
        )
        print(f"使用L2正则化，权重衰减系数: {weight_decay}")
    else:
        optimizer = paddle.optimizer.Adam(
            parameters=model.parameters(), 
            learning_rate=config['learning_rate']
        )
    
    # 保存模型的路径
    os.makedirs(config['model_path'], exist_ok=True)
    
    # 记录训练和验证的损失
    loss_record = {'train': [], 'val': []}
    best_acc = 0.0
    
    for epoch in range(config['epochs']):
        model.train()
        train_loss = 0.0
        train_num = 0
        
        accuracy_metric = metric.Accuracy()
        
        for batch_id, data in enumerate(train_loader):
            x_data, y_data = data
            
            # 前向传播
            predicts = model(x_data)
            loss = criterion(predicts, y_data)
            
            # 计算准确率
            acc = paddle.metric.accuracy(predicts, y_data)
            accuracy_metric.update(acc.numpy())
            
            if batch_id % 200 == 0:
                loss_record['train'].append(loss.numpy())
                print(f"epoch: {epoch}, batch_id: {batch_id}, loss is: {loss.numpy()}, acc is: {acc.numpy()}")
            
            # 反向传播
            loss.backward()
            optimizer.step()
            optimizer.clear_grad()
            
            train_loss += loss
            train_num += len(y_data)
        
        train_acc = accuracy_metric.accumulate()
        total_train_loss = train_loss / train_num * config['batch_size']
        print(f"epoch: {epoch}, train loss is: {total_train_loss.numpy()}, train acc is: {train_acc}")
        
        # 验证模型
        model.eval()
        val_loss = 0.0
        val_num = 0
        
        val_accuracy_metric = metric.Accuracy()
        
        for batch_id, data in enumerate(val_loader):
            x_data, y_data = data
            
            # 前向传播
            predicts = model(x_data)
            loss = criterion(predicts, y_data)
            
            # 计算准确率
            acc = paddle.metric.accuracy(predicts, y_data)
            val_accuracy_metric.update(acc.numpy())
            
            val_loss += loss
            val_num += len(y_data)
        
        val_acc = val_accuracy_metric.accumulate()
        total_val_loss = val_loss / val_num * config['batch_size']
        loss_record['val'].append(total_val_loss.numpy())
        print(f"epoch: {epoch}, val loss is: {total_val_loss.numpy()}, val acc is: {val_acc}")
        
        # 保存最佳模型
        if val_acc > best_acc:
            best_acc = val_acc
            paddle.save(model.state_dict(), f"{config['model_path']}/{config['model_name']}_best_model.pdparams")
            paddle.save(optimizer.state_dict(), f"{config['model_path']}/{config['model_name']}_best_optimizer.pdopt")
    
    paddle.save(model.state_dict(), f"{config['model_path']}/{config['model_name']}_final_model.pdparams")
    paddle.save(optimizer.state_dict(), f"{config['model_path']}/{config['model_name']}_final_optimizer.pdopt")
    
    return loss_record, best_acc

# 测试模型
def test_model(model, test_loader, config):
    criterion = nn.CrossEntropyLoss()
    
    # 加载最佳模型
    model_state_dict = paddle.load(f"{config['model_path']}/{config['model_name']}_best_model.pdparams")
    model.set_state_dict(model_state_dict)
    
    model.eval()
    test_loss = 0.0
    test_num = 0
    
    test_accuracy_metric = metric.Accuracy()
    
    for batch_id, data in enumerate(test_loader):
        x_data, y_data = data
        
        # 前向传播
        predicts = model(x_data)
        loss = criterion(predicts, y_data)
        
        # 计算准确率
        acc = paddle.metric.accuracy(predicts, y_data)
        test_accuracy_metric.update(acc.numpy())
        
        test_loss += loss
        test_num += len(y_data)
    
    test_acc = test_accuracy_metric.accumulate()
    total_test_loss = test_loss / test_num * config['batch_size']
    print(f"test loss is: {total_test_loss.numpy()}, test acc is: {test_acc}")
    
    return test_acc

# 在私有测试集上进行预测
def predict_private_test(model, config):
    # 加载私有测试数据
    try:
        private_test = np.load(config['private_test_path'])
        print(f"Size of private testing data: {private_test.shape}")
        
        private_test_set = TIMITDataset(private_test)
        private_test_loader = DataLoader(
            private_test_set, 
            batch_size=config['batch_size'], 
            shuffle=False
        )
        
        # 加载最佳模型
        model_state_dict = paddle.load(f"{config['model_path']}/{config['model_name']}_best_model.pdparams")
        model.set_state_dict(model_state_dict)
        
        model.eval()
        predictions = []
        
        for batch_id, data in enumerate(private_test_loader):
            x_data = data[0]
            logits = model(x_data)
            predictions.extend(paddle.argmax(logits, axis=1).cpu().numpy().tolist())
        
        # 保存预测结果
        with open(config['prediction_file'], "w") as f:
            for pred in predictions:
                f.write(f"{pred}\n")
        
        print(f"Predictions saved to {config['prediction_file']}")
    except Exception as e:
        print(f"Error predicting on private test set: {e}")

# 绘制学习曲线
def plot_learning_curve(loss_record, title='', save_path=None):
    total_steps = len(loss_record['train'])
    x_1 = range(total_steps)
    x_2 = x_1[len(loss_record['train']) // len(loss_record['val'])-1::len(loss_record['train']) // len(loss_record['val'])]
    
    plt.figure(figsize=(10, 6))
    plt.plot(x_1, loss_record['train'], c='tab:red', label='train')
    plt.plot(x_2, loss_record['val'], c='tab:cyan', label='val')
    plt.ylim(0.0, 5.)
    plt.xlabel('Training steps')
    plt.ylabel('CE loss')
    plt.title(f'Learning curve of {title}')
    plt.legend()
    
    if save_path:
        plt.savefig(save_path)
    plt.show()

# 运行实验
def run_experiment(config):
    # 设置随机种子
    same_seeds(config['seed'])
    
    # 设置设备
    try:
        paddle.device.set_device('gpu:0' if config.get('use_gpu', False) else 'cpu')
    except:
        print("GPU不可用，使用CPU进行训练")
        paddle.device.set_device('cpu')
    
    # 加载数据
    print('Loading data ...')
    train = np.load(config['data_root'] + 'train_x.npy')
    train_label = np.load(config['data_root'] + 'train_y.npy')
    test = np.load(config['data_root'] + 'test_x.npy')
    test_label = np.load(config['data_root'] + 'test_y.npy')
    
    print(f'Size of training data: {train.shape}')
    print(f'Size of testing data: {test.shape}')
    
    # 分割训练集和验证集
    val_ratio = config['val_ratio']
    percent = int(train.shape[0] * (1 - val_ratio))
    train_x, train_y = train[:percent], train_label[:percent]
    val_x, val_y = train[percent:], train_label[percent:]
    
    print(f'Size of training set: {train_x.shape}')
    print(f'Size of validation set: {val_x.shape}')
    
    # 创建数据集和数据加载器
    train_set = TIMITDataset(train_x, train_y)
    val_set = TIMITDataset(val_x, val_y)
    test_set = TIMITDataset(test, test_label)
    
    train_loader = DataLoader(train_set, batch_size=config['batch_size'], shuffle=True)
    val_loader = DataLoader(val_set, batch_size=config['batch_size'], shuffle=False)
    test_loader = DataLoader(test_set, batch_size=config['batch_size'], shuffle=False)
    # 清理不需要的变量以节省内存
    del train, train_label, train_x, train_y, val_x, val_y
    gc.collect()
    
    # 创建模型
    model = Classifier(config)
    
    # 打印模型信息
    print(f"Model: {config['model_name']}")
    print(f"Description: {config.get('description', 'N/A')}")
    print(f"Layer sizes: {config['layer_sizes']}")
    print(f"Activation: {config['activation']}")
    print(f"Batch Normalization: {config.get('use_bn', False)}")
    print(f"Dropout: {config.get('use_dropout', False)}")
    if config.get('use_dropout', False):
        print(f"Dropout rate: {config.get('dropout_rate', 0.2)}")
    print(f"L2 Regularization: {config.get('use_l2_reg', False)}")
    if config.get('use_l2_reg', False):
        print(f"Weight Decay: {config.get('weight_decay', 0.0001)}")
    print(f"Last Layer Softmax: {config.get('use_last_layer_softmax', False)}")
    print(f"Epochs: {config['epochs']}")
    print(f"Learning rate: {config['learning_rate']}")
    
    # 训练模型
    loss_record, best_val_acc = train_model(model, train_loader, val_loader, config)
    
    # 测试模型
    test_acc = test_model(model, test_loader, config)
    
    # 绘制学习曲线
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    model_name = config['model_name']
    plot_learning_curve(loss_record, title=model_name, 
                       save_path=f"{config['model_path']}/{model_name}_{timestamp}_curve.png")
    
    # 在私有测试集上进行预测
    if config.get('predict_private', False):
        predict_private_test(model, config)
    
    # 记录实验结果
    results = {
        'model_name': model_name,
        'description': config.get('description', 'N/A'),
        'val_acc': float(best_val_acc),
        'test_acc': float(test_acc),
        'config': {k: v for k, v in config.items() if not isinstance(v, dict)}
    }
    
    return results

def run_all_experiments(config, experiments=None):
    """运行多个实验并输出结果表格"""
    results = []
    
    if experiments is None:
        # 运行所有实验
        experiments = [name for name in config.keys() if isinstance(config[name], dict)]
        # 先运行基础配置
        print("\n" + "="*50)
        print(f"Running baseline model...")
        print("="*50)
        base_config = get_experiment_config(config)
        results.append(run_experiment(base_config))
    
    # 运行指定的实验
    for experiment in experiments:
        print("\n" + "="*50)
        print(f"Running experiment: {experiment}")
        print("="*50)
        
        exp_config = get_experiment_config(config, experiment)
        results.append(run_experiment(exp_config))
    
    # 输出结果表格
    print("\n" + "="*80)
    print("实验结果汇总")
    print("="*80)
    print(f"{'方法':<20}{'验证集准确率':<15}{'测试集准确率':<15}")
    print("-"*80)
    
    for result in results:
        print(f"{result['model_name']:<20}{result['val_acc']:<15.4f}{result['test_acc']:<15.4f}")
    
    print("="*80)
    
    # 保存结果到JSON文件
    results_file = 'experiment_results.json'
    with open(results_file, 'w', encoding='utf-8') as f:
        json.dump(results, f, indent=2, ensure_ascii=False)
    
    print(f"结果已保存到 {results_file}")
    
    return results

def save_single_result(result):
    """将单个实验结果更新或追加到JSON文件"""
    results_file = 'experiment_results.json'
    all_results = []
    if os.path.exists(results_file):
        try:
            with open(results_file, 'r', encoding='utf-8') as f:
                all_results = json.load(f)
            if not isinstance(all_results, list): # 基本验证
                print(f"警告: {results_file} 文件内容不是有效的列表。将创建新列表。")
                all_results = []
        except json.JSONDecodeError:
            print(f"警告: 无法解析 {results_file}。将创建新列表。")
            all_results = []
        except Exception as e:
            print(f"警告: 读取 {results_file} 时出错: {e}。将创建新列表。")
            all_results = []

    # 查找当前实验是否已存在，如果存在则更新，否则追加
    experiment_found = False
    if isinstance(result, dict) and 'model_name' in result:
        for i, existing_result in enumerate(all_results):
            if isinstance(existing_result, dict) and existing_result.get('model_name') == result['model_name']:
                all_results[i] = result # 更新现有条目
                experiment_found = True
                print(f"已更新 {result['model_name']} 的结果到 {results_file}")
                break
        
        if not experiment_found:
            all_results.append(result) # 追加新条目
            print(f"已追加 {result['model_name']} 的结果到 {results_file}")
    else:
        print("错误：实验结果格式无效，无法保存。")
        return

    # 将更新后的结果写回文件
    try:
        with open(results_file, 'w', encoding='utf-8') as f:
            json.dump(all_results, f, indent=2, ensure_ascii=False)
        print(f"成功保存更新后的结果到 {results_file}")
    except Exception as e:
        print(f"保存更新后的结果到 {results_file} 时出错: {e}")

def main():
    parser = argparse.ArgumentParser(description='TIMIT Phoneme Classification')
    parser.add_argument('--config', type=str, default='config.yaml', help='Path to configuration file')
    parser.add_argument('--experiment', type=str, help='Run specific experiment from config')
    parser.add_argument('--all', action='store_true', help='Run all experiments from config')
    args = parser.parse_args()
    
    # 加载配置
    config = load_config(args.config)
    
    if args.all:
        # 运行所有实验并覆盖保存结果
        run_all_experiments(config)
    elif args.experiment:
        # 运行指定实验
        if args.experiment not in config:
            print(f"Error: Experiment '{args.experiment}' not found in config file.")
            sys.exit(1)
        
        exp_config = get_experiment_config(config, args.experiment)
        results = run_experiment(exp_config)
        
        # 保存单个实验结果（更新或追加）
        save_single_result(results)
        
        # 打印结果
        print("\n" + "="*50)
        print(f"Model: {results['model_name']}")
        print(f"Validation Accuracy: {results['val_acc']:.4f}")
        print(f"Test Accuracy: {results['test_acc']:.4f}")
        print("="*50)
    else:
        # 运行基础配置
        exp_config = get_experiment_config(config)
        results = run_experiment(exp_config)
        
        # 保存单个实验结果（更新或追加）
        save_single_result(results)
        
        # 打印结果
        print("\n" + "="*50)
        print(f"Model: {results['model_name']}")
        print(f"Validation Accuracy: {results['val_acc']:.4f}")
        print(f"Test Accuracy: {results['test_acc']:.4f}")
        print("="*50)

if __name__ == '__main__':
    main()
