import os
import torch
import yaml
import json
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from torch.utils.data import ConcatDataset, DataLoader
import argparse
import time

from models.fault_diagnosis import DiagnosisModel
from models.condition_diffusion_model import ConditionedDDPM, DDPMTrainer
from data.bjut import single_domain_dataloader, BJUTDataset

class TrainingPipeline:
    def __init__(self, config_path='config/train.yaml'):
        # 加载配置
        with open(config_path, 'r', encoding='utf-8') as f:
            config = yaml.safe_load(f)
            self.config = config['train']  # 获取train下的配置
        
        # 创建实验目录
        self.exp_name = f"{self.config['domain_name']}_{time.strftime('%Y%m%d_%H%M%S')}"
        self.exp_dir = os.path.join(self.config['save_dir'], self.exp_name)
        os.makedirs(self.exp_dir, exist_ok=True)
        if self.config['device'] == "cuda":
            self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        else:
            self.device = torch.device('cpu')
        
        # 保存配置
        with open(os.path.join(self.exp_dir, 'config.json'), 'w') as f:
            json.dump(self.config, f, indent=4)
            
    def train_initial_classifier(self):
        """第一阶段：训练初始故障诊断器"""
        print("=== 阶段1: 训练初始故障诊断器 ===")
        
        # 初始化模型
        model = DiagnosisModel(
            input_dim=self.config['input_dim'],
            num_classes=self.config['num_classes'],
            lr=float(self.config['classifier'].get('lr', 1e-3))
        )
        
        # 准备数据加载器
        train_loader = single_domain_dataloader(
            data_path=self.config['data_path'],
            domain_name=self.config['domain_name'],
            batch_size=self.config['classifier']['batch_size'],
            split='train'
        )
        
        val_loader = single_domain_dataloader(
            data_path=self.config['data_path'],
            domain_name=self.config['domain_name'],
            batch_size=self.config['classifier']['batch_size'],
            split='val'
        )
        
        # 训练
        trainer = pl.Trainer(
            max_epochs=self.config['classifier']['num_epochs'],
            accelerator='gpu',
            devices=[0],
            callbacks=[
                ModelCheckpoint(
                    dirpath=os.path.join(self.exp_dir, 'classifier'),
                    filename='classifier-{epoch:02d}-{val_loss:.4f}',
                    save_top_k=3,
                    monitor='val_loss'
                )
            ],
            logger=TensorBoardLogger(
                save_dir=os.path.join(self.exp_dir, 'logs'),
                name='classifier'
            )
        )
        
        trainer.fit(model, train_loader, val_loader)
        
        # 保存最终模型
        classifier_path = os.path.join(self.exp_dir, 'classifier', 'final_model.pt')
        torch.save(model.state_dict(), classifier_path)
        print(f"初始分类器已保存到: {classifier_path}")
        
        return model, classifier_path
    
    def train_ddpm(self, feature_extractor):
        """第二阶段：训练特征锚定DDPM"""
        print("=== 阶段2: 训练特征锚定DDPM ===")
        
        # 初始化DDPM
        ddpm = ConditionedDDPM(
            feature_extractor=feature_extractor,
            num_classes=self.config['num_classes'],
            cond_dim=self.config['ddpm']['cond_dim'],
            timesteps=self.config['ddpm']['timesteps'],
            use_features=True,
            feature_matching_weight=self.config['ddpm']['feature_matching_weight']
        )
        
        # 准备数据加载器
        train_loader = single_domain_dataloader(
            data_path=self.config['data_path'],
            domain_name=self.config['domain_name'],
            batch_size=self.config['ddpm']['batch_size'],
            split='train'
        )
        
        # 训练
        ddpm_trainer = DDPMTrainer(ddpm, lr=float(self.config['ddpm']['lr']))
        trainer = pl.Trainer(
            max_epochs=self.config['ddpm']['num_epochs'],
            accelerator='gpu',
            devices=[0],
            callbacks=[
                ModelCheckpoint(
                    dirpath=os.path.join(self.exp_dir, 'ddpm'),
                    filename='ddpm-{epoch:02d}-{train_loss:.4f}',
                    save_top_k=3,
                    monitor='loss/train_loss'
                )
            ],
            logger=TensorBoardLogger(
                save_dir=os.path.join(self.exp_dir, 'logs', 'ddpm'),
                name='ddpm'
            )
        )
        
        trainer.fit(ddpm_trainer, train_loader)
        
        # 保存最终模型
        ddpm_path = os.path.join(self.exp_dir, 'ddpm', 'final_model.pt')
        torch.save(ddpm.state_dict(), ddpm_path)
        print(f"DDPM已保存到: {ddpm_path}")
        
        return ddpm, ddpm_path
    
    def enhance_classifier(self, initial_classifier, ddpm):
        """第三阶段：使用生成样本增强分类器"""
        print("=== 阶段3: 使用生成样本增强分类器 ===")
        
        # 创建保存生成样本的目录
        samples_dir = os.path.join(self.exp_dir, 'generated_samples')
        os.makedirs(samples_dir, exist_ok=True)
        
        # 生成增强样本
        num_synthetic = self.config['enhancement']['num_synthetic_per_class']
        batch_size = self.config['enhancement'].get('generation_batch_size', 32)
        synthetic_data = []
        synthetic_labels = []
        
        ddpm.eval()
        with torch.no_grad():
            for label in range(self.config['num_classes']):
                label_samples = []  # 收集每个类别的样本
                remaining = num_synthetic
                while remaining > 0:
                    current_batch = min(batch_size, remaining)
                    labels = torch.full((current_batch,), label, device=self.device)
                    samples, _ = ddpm.sample(labels)
                    synthetic_data.append(samples.cpu())
                    synthetic_labels.extend([label] * current_batch)
                    label_samples.append(samples.cpu())
                    remaining -= current_batch
                    torch.cuda.empty_cache()
                
                # 保存当前类别的所有样本
                label_samples = torch.cat(label_samples, dim=0)
                save_path = os.path.join(samples_dir, f'class_{label}_samples.pt')
                torch.save({
                    'samples': label_samples,
                    'label': label,
                    'source_domain': self.config['domain_name'],
                    'target_domain': self.config['enhancement']['target_domain'],
                    'num_samples': num_synthetic
                }, save_path)
                print(f"类别 {label} 的 {num_synthetic} 个样本已保存到: {save_path}")
        
        # 保存所有生成样本的完整数据集
        print("合并生成的样本...")
        synthetic_data = torch.cat(synthetic_data, dim=0)
        synthetic_labels = torch.tensor(synthetic_labels)
        complete_save_path = os.path.join(samples_dir, 'complete_synthetic_dataset.pt')
        torch.save({
            'data': synthetic_data,
            'labels': synthetic_labels,
            'source_domain': self.config['domain_name'],
            'target_domain': self.config['enhancement']['target_domain'],
            'num_samples_per_class': num_synthetic,
            'total_samples': len(synthetic_labels)
        }, complete_save_path)
        print(f"完整的生成数据集已保存到: {complete_save_path}")
        
        # 准备训练和验证数据集
        print("准备数据集...")
        # 源域训练数据
        real_train_dataset = BJUTDataset(
            data_path=self.config['data_path'],
            domain_name=self.config['domain_name'],
            split='train'
        )
        
        # 目标域验证数据
        target_val_dataset = BJUTDataset(
            data_path=self.config['data_path'],
            domain_name=self.config['enhancement']['target_domain'],  # 使用目标域
            split='train'  # 使用目标域的训练集作为验证集
        )
        
        # 创建训练数据集（源域真实数据 + 生成数据）
        train_dataset = ConcatDataset([
            real_train_dataset,
            torch.utils.data.TensorDataset(synthetic_data, synthetic_labels)
        ])
        
        # 创建数据加载器
        train_loader = DataLoader(
            train_dataset,
            batch_size=min(64, self.config['enhancement']['batch_size']),
            shuffle=True,
            num_workers=4,
            pin_memory=True
        )
        
        # 目标域验证数据加载器
        val_loader = DataLoader(
            target_val_dataset,
            batch_size=min(64, self.config['enhancement']['batch_size']),
            shuffle=False,
            num_workers=4,
            pin_memory=True
        )
        
        # 从初始分类器开始微调
        enhanced_classifier = DiagnosisModel(
            input_dim=self.config['input_dim'],
            num_classes=self.config['num_classes'],
            lr=float(self.config['enhancement'].get('lr', 1e-3))
        )
        enhanced_classifier.load_state_dict(initial_classifier.state_dict())
        
        # 训练增强分类器
        trainer = pl.Trainer(
            max_epochs=self.config['enhancement']['num_epochs'],
            accelerator='gpu',
            devices=[0],
            callbacks=[
                ModelCheckpoint(
                    dirpath=os.path.join(self.exp_dir, 'enhanced_classifier'),
                    filename='enhanced-{epoch:02d}-{val_loss:.4f}',
                    save_top_k=3,
                    monitor='val_loss'
                )
            ],
            logger=TensorBoardLogger(
                save_dir=os.path.join(self.exp_dir, 'logs', 'enhanced_classifier'),
                name='enhanced_classifier'
            )
        )
        
        trainer.fit(enhanced_classifier, train_loader, val_loader)  # 添加验证数据加载器
        
        # 保存最终增强模型
        enhanced_path = os.path.join(self.exp_dir, 'enhanced_classifier', 'final_model.pt')
        torch.save(enhanced_classifier.state_dict(), enhanced_path)
        print(f"增强分类器已保存到: {enhanced_path}")
        
        return enhanced_classifier, enhanced_path

    def run_pipeline(self, start_stage=1, classifier_path=None, ddpm_path=None):
        results = {}
        classifier = None
        # 阶段1：训练初始分类器
        if start_stage == 1:
            classifier, classifier_path = self.train_initial_classifier()
            results['initial_classifier'] = classifier_path
        elif classifier_path:
            # 加载预训练分类器
            print("=== 加载预训练分类器 ===")
            classifier = DiagnosisModel(
                input_dim=self.config['input_dim'],
                num_classes=self.config['num_classes'],
                lr=float(self.config['classifier'].get('lr', 1e-3))
            )
            classifier.load_state_dict(torch.load(classifier_path, map_location=self.device))
            classifier = classifier.to(self.device)
            classifier.eval()
            results['initial_classifier'] = classifier_path
        else:
            raise ValueError("需要提供预训练分类器路径以开始阶段2或3")
            
        # 阶段2：训练DDPM
        if start_stage <= 2:
            ddpm, ddpm_path = self.train_ddpm(classifier)
            results['ddpm'] = ddpm_path
        elif ddpm_path and start_stage == 3:
            # 加载预训练DDPM
            print("=== 加载预训练DDPM ===")
            ddpm = ConditionedDDPM(
                feature_extractor=classifier,
                num_classes=self.config['num_classes'],
                cond_dim=self.config['ddpm']['cond_dim'],
                timesteps=self.config['ddpm']['timesteps'],
                use_features=True,
                feature_matching_weight=self.config['ddpm']['feature_matching_weight']
            )
            ddpm.load_state_dict(torch.load(ddpm_path, map_location=self.device))
            ddpm = ddpm.to(self.device) 
            ddpm.eval()
            results['ddpm'] = ddpm_path
        elif start_stage == 3:
            raise ValueError("需要提供预训练DDPM路径以开始阶段3")
            
        # 阶段3：增强分类器
        if start_stage <= 3:
            enhanced_classifier, enhanced_path = self.enhance_classifier(classifier, ddpm)
            results['enhanced_classifier'] = enhanced_path
            
        print("\n=== 训练完成 ===")
        print(f"所有模型和日志已保存到: {self.exp_dir}")
        return results

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='训练流程控制')
    parser.add_argument('--config', type=str, default='config/train.yaml', help='配置文件路径')
    parser.add_argument('--start_stage', type=int, default=1, choices=[1, 2, 3], help='开始的训练阶段')
    parser.add_argument('--classifier_path', type=str, help='预训练分类器路径')
    parser.add_argument('--ddpm_path', type=str, help='预训练DDPM路径')
    
    args = parser.parse_args()
    
    # 运行训练流程
    pipeline = TrainingPipeline(args.config)
    results = pipeline.run_pipeline(
        start_stage=args.start_stage,
        classifier_path=args.classifier_path,
        ddpm_path=args.ddpm_path
    )
