import os
import time
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, random_split
from torch.utils.tensorboard import SummaryWriter
from CircledResidualUNet import CircledResidualUNet, L_data
import matplotlib.pyplot as plt


class PPGDataset(Dataset):
    def __init__(self, folder_path, window_length=250, start=0, end=None):
        self.data = self.load_data(folder_path, window_length, start, end)

    def load_data(self, folder_path, window_length=250, start=0, end=None):
        all_segments = []

        df = pd.read_csv(folder_path, comment='#')
        data = df[['time', 'pleth', 'ecg_ii', 'abp_aligned']].values

        # 取指定范围
        data = data[start:end]

        # 滑动窗口分割数据
        for i in range(0, len(data) - window_length + 1, window_length):
            segment = data[i:i+window_length]
            all_segments.append(segment)

        final_tensor = torch.tensor(np.array(all_segments), dtype=torch.float32)
        print(f"数据集大小: {final_tensor.shape}")  # (N, window_length, 4)
        return final_tensor

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]

class UNetTrainer:
    def __init__(self, model, train_loader, val_loader, loss_fn, optimizer, scheduler, device):
        self.model = model.to(device)
        self.train_loader = train_loader
        self.val_loader = val_loader
        self.loss_fn = loss_fn
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.device = device
        self.writer = SummaryWriter()
        
    def train_epoch(self):
        self.model.train()
        total_loss = 0.0
        
        for batch in self.train_loader:
            inputs = batch[:,:,:3].swapaxes(1,2).to(self.device)
            targets = batch[:,:,3].unsqueeze(1).to(self.device)
            
            self.optimizer.zero_grad()
            outputs = self.model(inputs)
            loss = self.loss_fn(outputs, targets)
            loss.backward()
            self.optimizer.step()
            
            total_loss += loss.item()
            
        return total_loss / len(self.train_loader)
    
    def validate(self):
        self.model.eval()
        val_loss = 0.0
        
        with torch.no_grad():
            for batch in self.val_loader:
                inputs = batch[:,:,:3].swapaxes(1,2).to(self.device)
                targets = batch[:,:,3].unsqueeze(1).to(self.device)
                outputs = self.model(inputs)
                val_loss += self.loss_fn(outputs, targets).item()
                
        return val_loss / len(self.val_loader)
    
    def train(self, epochs, fixed_samples=None, save_path="model.pth",log_interval=10):
        best_val_loss = float('inf')
        
        for epoch in range(1, epochs + 1):
            start_time = time.time()
            
            # 训练和验证
            train_loss = self.train_epoch()
            val_loss = self.validate()
            
            if train_loss < 5:
                self.scheduler.step()

            # 记录日志
            self.writer.add_scalar("Loss/Train", train_loss, epoch)
            self.writer.add_scalar("Loss/Validation", val_loss, epoch)
            
            # 固定样本测试和可视化
            if fixed_samples is not None and (epoch % log_interval == 0 or epoch == 1):
                plot_fixed_predictions(self.model, fixed_samples, epoch)
            
            # 保存最佳模型
            if val_loss < best_val_loss:
                best_val_loss = val_loss
                torch.save(self.model.state_dict(), save_path)
                print(f"Saved new best model with val_loss: {val_loss:.4f}")
            
            # 打印进度
            epoch_time = time.time() - start_time
            print(f"Epoch {epoch:03d}/{epochs} | "
                  f"Train Loss: {train_loss:.4f} | "
                  f"Val Loss: {val_loss:.4f} | "
                  f"Time: {epoch_time:.2f}s")
        
        self.writer.close()

def setup_fixed_test_samples(dataset, num_samples=5):
    """从数据集中提取固定测试样本"""
    fixed_samples = [dataset[i] for i in range(min(num_samples, len(dataset)))]
    return torch.stack(fixed_samples)

def plot_fixed_predictions(model, fixed_samples, epoch, save_dir="fixed_test_plots"):
    """绘制固定测试样本的预测结果"""
    os.makedirs(save_dir, exist_ok=True)
    model.eval()
    
    with torch.no_grad():
        inputs = fixed_samples[:,:,:3].swapaxes(1,2).to(next(model.parameters()).device)
        targets = fixed_samples[:,:,3].cpu().numpy()
        outputs = model(inputs).cpu().numpy().squeeze(1)
        timestamps = fixed_samples[:,:,0].cpu().numpy()
        
        # 当前epoch预测图
        for i in range(len(fixed_samples)):
            plt.figure(figsize=(12, 6))
            plt.plot(timestamps[i], targets[i], label="Groundtruth ABP", color="blue", linewidth=1.5)
            plt.plot(timestamps[i], outputs[i], label=f"Epoch {epoch}", color="red", alpha=0.8, linestyle="--")
            plt.xlabel("Time (s)")
            plt.ylabel("ABP (mmHg)")
            plt.title(f"Fixed Sample {i+1} - Epoch {epoch}")
            plt.legend()
            plt.grid(True)
            plt.savefig(f"{save_dir}/sample_{i+1}_epoch_{epoch}.png", dpi=300, bbox_inches='tight')
            plt.close()
            


def main():
    # 配置参数
    config = {
        "data_path": "original_data_0011.csv",
        "window_length": 250,
        "batch_size": 256,
        "lr": 0.001,
        "epochs": 10000,
        "num_test_samples": 3,
        "log_interval": 3
    }

    # 初始化设备
    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
    
    # 数据准备
    dataset = PPGDataset(config["data_path"], window_length=config["window_length"], start = 0 ,end = 2000000)
    train_size = int(0.8 * len(dataset))
    train_set, val_set = random_split(dataset, [train_size, len(dataset)-train_size])
    
    train_loader = DataLoader(train_set, batch_size=config["batch_size"], shuffle=True)
    val_loader = DataLoader(val_set, batch_size=config["batch_size"])
    
    # 固定测试样本
    fixed_samples = setup_fixed_test_samples(val_set, num_samples=config["num_test_samples"])
    
    # 模型和优化器
    model = CircledResidualUNet(in_channels=3, out_channels=1, hidden_channels=[64, 128, 256])
    optimizer = optim.Adam(model.parameters(), lr=config["lr"])
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.999)
    
    # 训练
    trainer = UNetTrainer(
        model=model,
        train_loader=train_loader,
        val_loader=val_loader,
        loss_fn=L_data,  # 假设从model模块导入
        optimizer=optimizer,
        scheduler=scheduler,
        device=device
    )
    
    trainer.train(
        epochs=config["epochs"],
        fixed_samples=fixed_samples,
        save_path="best_unet_model.pth",
        log_interval=config["log_interval"]
    )

if __name__ == "__main__":
    main()
