import os
import torch
from torch.utils.data import DataLoader
from model_wavelet import ECG_Autoencoder_Classifier_Lstm, ECG_Autoencoder_Classifier_Transformer, ECG_Autoencoder_Classifier_Mamba, ECG_Autoencoder_Classifier_Cnn
from data_loader import ECGDataset
from model_time import ChannelWiseCNN
import pandas as pd
import numpy as np
from collections import defaultdict

def load_all_models(base_path, config, device):
    """加载所有预训练模型"""
    models = {}
    weights = {}
    
    model_classes = {
        'WaveletLstm': ECG_Autoencoder_Classifier_Lstm,
        'WaveletTransformer': ECG_Autoencoder_Classifier_Transformer,
        #'WaveletMamba': ECG_Autoencoder_Classifier_Mamba,
        #'WaveletCnn': ECG_Autoencoder_Classifier_Cnn,
        'TimeCNN': ChannelWiseCNN
    }
    
    for model_name in model_classes.keys():
        # 加载模型
        model_path = os.path.join(base_path, model_name, 'model_weights.pth')
        if not os.path.exists(model_path):
            print(f"找不到模型文件: {model_path}")
            continue
            
        model = model_classes[model_name](
            input_dim=config['input_dim'],
            hidden_dim=config['hidden_dim'],
            latent_dim=config['latent_dim'],
            num_layers=config['num_layers']
        )
        checkpoint = torch.load(model_path, map_location=device)
        model.load_state_dict(checkpoint)
        model = model.to(device)
        model.eval()
        models[model_name] = model
        
        # 获取准确率分数
        report_path = os.path.join(base_path, model_name, 'evaluation_report.txt')
        if os.path.exists(report_path):
            with open(report_path, 'r') as f:
                for line in f:
                    if 'Accuracy:' in line:  # 改为查找准确率
                        accuracy = float(line.split(':')[1].strip())
                        weights[model_name] = accuracy  # 使用准确率作为权重
                        break
    
    # 归一化权重
    total_weight = sum(weights.values())
    weights = {k: v/total_weight for k, v in weights.items()}
    
    return models, weights

def save_individual_results(predictions, probabilities, model_name, base_output_path):
    """保存单个模型的预测结果"""
    output_path = os.path.join(os.path.dirname(base_output_path), f'{model_name}_predictions.csv')
    results_df = pd.DataFrame({
        'file_id': predictions.keys(),
        'prediction': predictions.values(),
        'probability': probabilities.values()
    })
    results_df = results_df.sort_values('file_id')
    results_df.to_csv(output_path, index=False)
    return results_df

def predict_with_all_models(models, weights, dataloader, device):
    """使用所有模型进行预测并集成结果"""
    all_predictions = defaultdict(dict)
    all_probabilities = defaultdict(dict)
    file_ids_set = set()
    
    # 存储每个模型的单独预测结果
    individual_results = {}
    
    for model_name, model in models.items():
        print(f"\n正在使用 {model_name} 模型进行预测...")
        predictions, probabilities, file_ids, segment_indices = predict_samples(model, dataloader, device)
        
        # 聚合每个模型的预测结果
        final_predictions, final_probabilities = aggregate_predictions(
            predictions, probabilities, file_ids, segment_indices
        )
        
        # 存储该模型的预测结果
        individual_results[model_name] = (final_predictions, final_probabilities)
        
        for file_id in final_predictions.keys():
            all_predictions[file_id][model_name] = final_predictions[file_id]
            all_probabilities[file_id][model_name] = final_probabilities[file_id]
            file_ids_set.add(file_id)
    
    # 计算加权平均结果
    ensemble_predictions = {}
    ensemble_probabilities = {}
    
    for file_id in file_ids_set:
        weighted_prob = 0
        total_weight = 0
        
        for model_name in models.keys():
            if model_name in all_probabilities[file_id]:
                weight = weights[model_name]
                weighted_prob += all_probabilities[file_id][model_name] * weight
                total_weight += weight
        
        if total_weight > 0:
            final_prob = weighted_prob / total_weight
            ensemble_probabilities[file_id] = final_prob
            ensemble_predictions[file_id] = 1 if final_prob >= 0.55 else 0
    
    return ensemble_predictions, ensemble_probabilities, individual_results

def load_model(model_path, config, device):
    """加载预训练模型"""
    model = ECG_Autoencoder_Classifier_Cnn(
        input_dim=config['input_dim'],
        hidden_dim=config['hidden_dim'],
        latent_dim=config['latent_dim'],
        num_layers=config['num_layers']
    )
    checkpoint = torch.load(model_path, map_location=device)
    model.load_state_dict(checkpoint)
    model = model.to(device)
    model.eval()
    return model

def predict_samples(model, dataloader, device):
    """对数据进行预测"""
    predictions = []
    file_ids = []
    segment_indices = []
    probabilities = []
    
    with torch.no_grad():
        for batch in dataloader:
            inputs, _, sample_info = batch
            inputs = inputs.to(device)
            
            # 从sample_info中提取file_id和segment_idx
            # 假设格式为 "a_segment_0" 这样的形式
            for info in sample_info:
                file_id, _, seg_idx = info.split('_')
                file_ids.append(file_id)
                segment_indices.append(int(seg_idx))
            
            # 获取模型预测
            logits = model(inputs)
            probs = torch.sigmoid(logits)
            preds = (probs >= 0.55).float()
            
            predictions.extend(preds.cpu().numpy())
            probabilities.extend(probs.cpu().numpy())
    
    return predictions, probabilities, file_ids, segment_indices

def aggregate_predictions(predictions, probabilities, file_ids, segment_indices):
    """聚合每个文件的预测结果"""
    results = defaultdict(list)
    prob_results = defaultdict(list)
    
    # 收集每个文件的所有预测结果
    for pred, prob, file_id, _ in zip(predictions, probabilities, file_ids, segment_indices):
        results[file_id].append(pred[0])  # pred是二维数组，取第一个元素
        prob_results[file_id].append(prob[0])  # prob也是二维数组
    
    # 计算每个文件的最终预测结果
    final_predictions = {}
    final_probabilities = {}
    for file_id in results:
        # 计算平均概率
        avg_prob = np.mean(prob_results[file_id])
        final_probabilities[file_id] = avg_prob
        # 根据平均概率决定最终标签
        final_predictions[file_id] = 1 if avg_prob >= 0.55 else 0
    
    return final_predictions, final_probabilities

def save_results(predictions, probabilities, output_path):
    """保存预测结果到CSV文件"""
    results_df = pd.DataFrame({
        'file_id': predictions.keys(),
        'prediction': predictions.values(),
        'probability': probabilities.values()
    })
    # 按照file_id排序
    results_df = results_df.sort_values('file_id')
    results_df.to_csv(output_path, index=False)
    return results_df

def main():
    # 配置参数
    predict_data_dir = 'data/processed/predict_data'
    models_base_path = 'results/final_model'
    output_path = 'results/predictions/ensemble_predictions.csv'
    batch_size = 32
    
    # 确保输出目录存在
    os.makedirs(os.path.dirname(output_path), exist_ok=True)
    
    # 设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    
    # 模型配置
    model_config = {
        'input_dim': 1,
        'hidden_dim': 256,
        'latent_dim': 128,
        'num_layers': 8
    }
    
    # 加载数据
    predict_dataset = ECGDataset(predict_data_dir)
    predict_loader = DataLoader(predict_dataset, 
                              batch_size=batch_size, 
                              shuffle=False,
                              num_workers=0)
    
    # 加载所有模型
    try:
        models, weights = load_all_models(models_base_path, model_config, device)
        print("模型权重分配:", weights)
    except Exception as e:
        print(f"模型加载失败: {str(e)}")
        return
    
    # 进行集成预测
    final_predictions, final_probabilities, individual_results = predict_with_all_models(
        models, weights, predict_loader, device
    )
    
    # 保存每个模型的独立预测结果
    print("\n各模型独立预测结果:")
    for model_name, (predictions, probabilities) in individual_results.items():
        results_df = save_individual_results(predictions, probabilities, model_name, output_path)
        print(f"\n{model_name} 模型预测结果:")
        print(results_df)
        
        # 计算该模型预测的正例比例
        #positive_ratio = (results_df['prediction'] == 1).mean() * 100
        #print(f"{model_name} 模型预测正例比例: {positive_ratio:.2f}%")
    
    # 保存集成结果
    results_df = save_results(final_predictions, final_probabilities, output_path)
    
    # 打印集成结果
    print("\n最终集成预测结果:")
    print(results_df)
    #positive_ratio = (results_df['prediction'] == 1).mean() * 100
    #print(f"集成模型预测正例比例: {positive_ratio:.2f}%")
    print(f"\n预测结果已保存到: {output_path}")

if __name__ == '__main__':
    main()
