from slip_detector import EnhancedTactileCNN, StreamTactileDataset
import torch
import torch.nn as nn
import os
import numpy as np
import random
import tqdm
import torch.nn.functional as F
import time
import pandas as pd

class NetworkTrainee:
    def __init__(self, res_dir = None, dataset_dir = None):
        """
        Initializes the NetworkTrainee class.
        Args:
            model (EnhancedTactileCNN): The model to be trained.
            dataset_dir (str): The directory where the dataset is stored.
        """
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.model = EnhancedTactileCNN().to(self.device)

        if res_dir is None:
            res_dir = self._get_res_dir()
        if res_dir is None:
            print("[NetworkTrainee] Resource directory not found. Using a random start network.")
        else:
            print("[NetworkTrainee] Resource directory found at:", res_dir)
            if torch.__version__ < '2.4.0':
                self.model.load_state_dict(torch.load(os.path.join(res_dir,"slip_delta_model.pth"), map_location=self.device))
            else:
                self.model.load_state_dict(torch.load(os.path.join(res_dir,"slip_delta_model.pth"), map_location=self.device,weights_only=True))
        print("[NetworkTrainee] model loaded successfully")
        self.res_dir = res_dir
        self.next_res_dir = self._get_next_res_dir()
        print(f"[NetworkTrainee] Next resource directory for saving results: {self.next_res_dir}")

        if dataset_dir is None:
            dataset_dir = self._get_dataset_dir()
        self.dataset_dir = dataset_dir

        self.sample_meta = []
        self.frame_cnt = None
        self.train_loader = None
        self.val_loader = None
        self.train_result = None
        self.scanned_dir = None

    def get_sample_meta(self):
        """
        Returns the metadata of the samples in the dataset.
        This method should be implemented to return the actual metadata.
        """
        scanned_dir = []
        self.frame_cnt = 0
        # Implement logic to retrieve sample metadata
        for dataset in os.listdir(self.dataset_dir):
            if not dataset.startswith('dataset_'):
                continue
            scanned_dir.append(dataset)
            dataset = os.path.join(self.dataset_dir, dataset)
            for exp_dir in os.listdir(dataset):
                exp_dir = os.path.join(dataset, exp_dir)
                for sensor_id in [0, 1]:
                    label_path = os.path.join(exp_dir, f'true_delta_{sensor_id}.npy')
                    tac_path = os.path.join(exp_dir, f'tacdata_{sensor_id}.npy')    
                    if not (os.path.exists(label_path) and os.path.exists(tac_path)):
                        continue
                    
                    # 添加元数据记录
                    data_length = len(np.load(label_path))
                    self.sample_meta.extend([
                        (exp_dir, sensor_id,data_length) 
                    ])
                    self.frame_cnt += data_length
            
            if not os.path.exists(os.path.join(dataset, "used_dataset")):
                f = open(os.path.join(dataset, "used_dataset"), 'w')
                f.close()
                print(f"[NetworkTrainee] Freezed dataset {dataset} for training.")
        print(f"[NetworkTrainee] Scanned {scanned_dir} for samples.")
        print(f"[NetworkTrainee] Found {len(self.sample_meta)} samples in {len(scanned_dir)} datasets.")
        self.scanned_dir = scanned_dir

    def create_dataloader(self, train_ratio = 0.8, batch_size=32):
        """
        Creates a DataLoader for the dataset.
        Args:
            batch_size (int): Size of each batch.
        Returns:
            DataLoader: A DataLoader instance for the dataset.
        """

        # Get and split the sample metadata
        self.get_sample_meta()
        random.shuffle(self.sample_meta)
        split_index = int(len(self.sample_meta) * train_ratio)
        train_meta = self.sample_meta[:split_index]
        val_meta = self.sample_meta[split_index:]

        # Create the dataset
        mean,std = self._estimate_mean_std(self.sample_meta, sample_size=100)
        train_ds = StreamTactileDataset(train_meta, mean, std)
        val_ds = StreamTactileDataset(val_meta, mean, std)
        print(f"[NetworkTrainee] Dataset created with {train_ds.__len__()} training samples and {val_ds.__len__()} validation samples.")

        # Save normalization parameters
        np.save(os.path.join(self.next_res_dir, "mean.npy"), mean)
        np.save(os.path.join(self.next_res_dir, "std.npy"), std)

        # Create DataLoaders
        train_loader = torch.utils.data.DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
        val_loader = torch.utils.data.DataLoader(val_ds, batch_size=batch_size, shuffle=False, num_workers=4, pin_memory=True)
        print("[NetworkTrainee] training set size:", len(train_loader.dataset))
        print("[NetworkTrainee] validation set size:", len(val_loader.dataset))
        self.train_loader = train_loader
        self.val_loader = val_loader

    def train(self, epochs=100, patience=10):
        """
        Trains the model on the dataset.
        Args:
            epochs (int): Number of epochs to train for.
            batch_size (int): Size of each batch during training.
        """
        criterion = nn.SmoothL1Loss(beta=0.5)  # 对回归任务更鲁棒
        optimizer = torch.optim.AdamW(self.model.parameters(), 
                                    lr=3e-3, 
                                    weight_decay=1e-4)

        # 学习率调度器
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, 
            mode='min', 
            factor=0.5, 
            patience=3
        )

        # 早停机制
        best_loss = float('inf')
        counter = 0
        self.train_result = {
            "epoch": [],
            "train_loss": [],
            "val_loss": [],
            "train_mae": [],
            "val_mae": [],
            "used_time": [],
        }

        for epoch in range(epochs):
            self.train_result["epoch"].append(epoch + 1)
            print(f"\nEpoch {epoch+1}/{epochs}")
            st_time = time.time()
            self.model.train()
            train_loss = 0
            mae = 0
            pbar = tqdm.tqdm(self.train_loader, desc="Training")
            for inputs, targets in pbar:
                pbar.set_description(f"Training Epoch {epoch+1}/{epochs}")

                inputs, targets = inputs.to(self.device), targets.to(self.device)
                
                optimizer.zero_grad()
                outputs = self.model(inputs).squeeze()
                loss = criterion(outputs, targets)
                loss.backward()
                pbar.set_postfix(loss=loss.item())
                
                # 梯度裁剪
                nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)
                optimizer.step()
                
                train_loss += loss.item()
                with torch.no_grad():
                    mae += F.l1_loss(outputs, targets).item()
            print(f"训练损失: {train_loss/len(self.train_loader):.4f} MAE: {mae/len(self.train_loader):.4f}")
            self.train_result["train_loss"].append(train_loss/len(self.train_loader))
            self.train_result["train_mae"].append(mae/len(self.train_loader))
            
            # 验证阶段
            self.model.eval()
            val_loss = 0
            mae = 0
            with torch.no_grad():
                pbar = tqdm.tqdm(self.val_loader, desc="Validation")
                for inputs, targets in pbar:
                    pbar.set_description(f"Validation Epoch {epoch+1}/{epochs}")
                    inputs, targets = inputs.to(self.device), targets.to(self.device)
                    outputs = self.model(inputs).squeeze()
                    val_loss += criterion(outputs, targets).item()
                    mae += F.l1_loss(outputs, targets).item()
            print(f"验证损失: {val_loss/len(self.val_loader):.4f} 验证MAE: {mae/len(self.val_loader):.4f} ")
            self.train_result["val_loss"].append(val_loss/len(self.val_loader))
            self.train_result["val_mae"].append(mae/len(self.val_loader))
            
            # 学习率调整
            scheduler.step(val_loss)
            elapsed_time = time.time() - st_time
            self.train_result["used_time"].append(elapsed_time)            
            # 早停判断
            if val_loss < best_loss:
                best_loss = val_loss
                counter = 0
                torch.save(self.model.state_dict(), os.path.join(self.next_res_dir, "slip_delta_model.pth"))
            else:
                counter += 1
                if counter >= patience:
                    print(f"Early stopping at epoch {epoch}")
                    break 

            print(f"Epoch {epoch+1} completed in {elapsed_time/60.0:.2f} minutes.") 

        

    def save_training_results(self, goal_dir="resultlib"):
        time_str = time.strftime("%Y%m%d_%H%M%S")
        dir = os.path.join(goal_dir, f"train_result_{time_str}")      
        if not os.path.exists(dir):
            os.makedirs(dir)
        print(f"[NetworkTrainee] Saving training results to {dir}")
        # 保存训练配置
        config_path = os.path.join(dir, "config.txt")
        with open(config_path, 'w') as f:
            f.write(f"Device: {self.device}\n")
            f.write(f"Model: {self.model.__class__.__name__}\n")
            f.write(f"Scanned directories: {self.scanned_dir}\n")
            f.write(f"Count of experiments: {len(self.sample_meta)}\n")
            f.write(f"Epochs: {len(self.train_result['train_loss'])}\n")
            f.write(f"Batch Size: {self.train_loader.batch_size}\n")

        
        # 保存训练结果
        result_path = os.path.join(dir, "training_results.csv")
        df = pd.DataFrame(self.train_result)
        df.to_csv(result_path, index=False)


    def _get_res_dir(self):
        """
        Get the resource directory for the model and normalization parameters.

        Returns:
            str: The resource directory.
        """
        current_dir = "slip_detector"
        res_idx = 0
        while True:
            res_dir = os.path.join(current_dir, f'res{res_idx}')
            if not os.path.exists(res_dir) or not os.path.exists(os.path.join(res_dir, "stable")):
                res_idx -= 1
                break
            res_idx += 1

        res_dir = os.path.join(current_dir, f'res{res_idx}')
        if not os.path.exists(res_dir):
            return None
        return res_dir
    
    def _get_next_res_dir(self):
        """
        Get the next resource directory for saving results.

        Returns:
            str: The next resource directory.
        """
        current_dir = "slip_detector"
        res_idx = 0
        while True:
            res_dir = os.path.join(current_dir, f'res{res_idx}')
            if not os.path.exists(res_dir) or not os.path.exists(os.path.join(res_dir, "stable")):
                break
            res_idx += 1
        if not os.path.exists(res_dir):
            os.makedirs(res_dir)
        return res_dir
    
    def _get_dataset_dir(self):
        """
        Returns the directory where the dataset is stored.
        Data should be stored in a directory named 'dataset_{index}' where index is an integer.
        """
        return "datalib"
    
    def _create_dataset(self, meta):
        all_tacdata = []
        all_labels = []
        for exp_dir, sensor_id,_ in meta:
            label_path = os.path.join(exp_dir, f'true_delta_{sensor_id}.npy')
            tac_path = os.path.join(exp_dir, f'tacdata_{sensor_id}.npy')    
    
            # 读取数据
            labels = np.load(label_path)
            tac_data = np.load(tac_path)
            
            # 放入数据集
            all_tacdata.append(tac_data)
            all_labels.append(labels)
        print("Created.")
        return np.concatenate(all_tacdata), np.concatenate(all_labels)
    
    def _calculate_mean_std(self, data):
        """
        Calculate the mean and standard deviation of the dataset.
        Args:
            data (np.ndarray): The dataset for which to calculate mean and std.
        Returns:
            tuple: Mean and standard deviation of the dataset.
        """
        mean = np.mean(data, axis=(0,1))
        std = np.std(data, axis=(0,1))
        return mean, std
    
    def _estimate_mean_std(self, metas, sample_size=100):
        """
        Estimate the mean and standard deviation of the dataset.
        Args:
            meta (list): Metadata of the dataset.
        Returns:
            tuple: Mean and standard deviation of the dataset.
        """
        if len(metas) < sample_size:
            sample_size = len(metas)
            print(f"[NetworkTrainee] Sample size is larger than dataset size. Using {sample_size} samples.")
        sampled_metas = random.sample(metas, sample_size)
        all_tacdata, _ = self._create_dataset(sampled_metas)
        mean = np.mean(all_tacdata, axis=(0,1))
        std = np.std(all_tacdata, axis=(0,1))

        return mean, std

if __name__ == "__main__":
    # Example usage
    trainee = NetworkTrainee()
    trainee.create_dataloader()
    trainee.train(epochs=100, patience=7)
    trainee.save_training_results(goal_dir="resultlib")