# -*- coding: utf-8 -*-
"""
Name: get_datasets.py
Author: AI Assistant
Date: 2024
Project: DeepDeePC
Description: Get DDeePC training and test datasets
"""

import torch
import numpy as np
from torch.utils.data import DataLoader, TensorDataset, random_split
from QYtool import datatool, mathtool
import os
import matplotlib.pyplot as plt


def get_datasets(args, test_ratio=0.2, random_seed=42):
    """
    Get DDeePC training and test datasets
    
    Args:
        args: Configuration parameter dictionary
        test_ratio: Test set ratio, default is 0.2
        random_seed: Random seed for dataset splitting
        
    Returns:
        train_loader: Training set data loader
        test_loader: Test set data loader
        input_data: Complete input data
        label_data: Complete label data
    """
    # System parameters
    system = args['system']
    u_dim = args['u_dim']
    y_dim = args['y_dim']
    T = args['T']
    Tini = args['Tini']
    Np = args['Np']
    batch_size = args['batch_size']
    data_size = args['data_size']
    
    # Load offline data
    offline_dir = args['offline_dir']
    ud, yd = datatool.loadtxt(offline_dir, 'u', 'y')
    ud = ud.reshape(-1, u_dim)
    yd = yd.reshape(-1, y_dim)
    
    # Load open-loop data
    openloop_dir = args['openloop_dir']
    u, y = datatool.loadtxt(openloop_dir, 'u', 'y')
    u = u.reshape(-1, u_dim)
    y = y.reshape(-1, y_dim)
    
    # Scale data
    scale_dir = args['scale_dir']
    u_sc1, u_sc2 = datatool.loadtxt(scale_dir, 'u_min', 'u_max')
    y_sc1, y_sc2 = datatool.loadtxt(scale_dir, 'y_min', 'y_max')
    
    u_sc1, u_sc2 = u_sc1.reshape(1, -1), u_sc2.reshape(1, -1)
    y_sc1, y_sc2 = y_sc1.reshape(1, -1), y_sc2.reshape(1, -1)
    
    # Data normalization
    ud_, u_ = datatool.scale(u_sc1, u_sc2, 'minmax', 'array', ud, u)   
    yd_, y_ = datatool.scale(y_sc1, y_sc2, 'minmax', 'array', yd, y)
    
    # Build Hankel matrix
    Hlu_ = mathtool.hankel(Tini+Np, 'tensor', torch.FloatTensor(ud_[:T, :]))
    Hly_ = mathtool.hankel(Tini+Np, 'tensor', torch.FloatTensor(yd_[:T, :]))
    
    Up_, Uf_ = Hlu_[:u_dim*Tini, :].clone(), Hlu_[u_dim*Tini:, :].clone()
    Yp_, Yf_ = Hly_[:y_dim*Tini, :].clone(), Hly_[y_dim*Tini:, :].clone()
    
    # Prepare neural network input and label data
    u_ = u_[:data_size, :]
    y_ = y_[:data_size, :]
    L = Tini + Np
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    u_L_ = torch.FloatTensor(np.array([u_[i:i+L, :] for i in range(u_.shape[0]-L+1)])).to(device)
    y_L_ = torch.FloatTensor(np.array([y_[i:i+L, :] for i in range(y_.shape[0]-L+1)])).to(device)
    
    input_data, label_data = [], []
    for i in range(len(u_L_)):
        uini_ = u_L_[i, :Tini, :].flatten()
        yini_ = y_L_[i, :Tini, :].flatten()
        u_e_ = u_L_[i, Tini, :].flatten() - u_L_[i, Tini-1, :].flatten()
        y_e_ = y_L_[i, Tini+1, :].flatten() - y_L_[i, Tini, :].flatten()
        input_part = torch.cat((uini_, yini_, u_e_, y_e_), dim=0)
        label_part = torch.cat((u_L_[i, Tini:, :].flatten(), y_L_[i, Tini:, :].flatten()), dim=0)
        input_data.append(input_part)
        label_data.append(label_part)
    
    input_data = torch.stack(input_data)
    label_data = torch.stack(label_data)
    
    # Create complete dataset
    dataset = TensorDataset(input_data, label_data)
    
    # Split training and test sets
    dataset_size = len(dataset)
    test_size = int(test_ratio * dataset_size)
    train_size = dataset_size - test_size
    
    torch.manual_seed(random_seed)
    train_dataset, test_dataset = random_split(dataset, [train_size, test_size])
    
    # Create data loaders
    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
    
    print(f"Total dataset size: {dataset_size}")
    print(f"Training set size: {train_size}")
    print(f"Test set size: {test_size}")
    print(f"Input feature dimension: {input_data.shape[1]}")
    print(f"Output label dimension: {label_data.shape[1]}")
    
    # Visualize data distribution
    visualize_data_distribution(input_data, label_data, args)
    
    return train_loader, test_loader, input_data, label_data


def visualize_data_distribution(input_data, label_data, args, save_dir=None):
    """
    Visualize data distribution
    
    Args:
        input_data: Input data
        label_data: Label data
        args: Configuration parameter dictionary
        save_dir: Save directory, default is None (not saved)
    """
    if save_dir is None:
        save_dir = args['fig_dir']
        os.makedirs(save_dir, exist_ok=True)
    
    # Calculate input and label statistics
    input_mean = torch.mean(input_data, dim=0).cpu().numpy()
    input_std = torch.std(input_data, dim=0).cpu().numpy()
    label_mean = torch.mean(label_data, dim=0).cpu().numpy()
    label_std = torch.std(label_data, dim=0).cpu().numpy()
    
    # Visualize input feature distribution
    plt.figure(figsize=(12, 6))
    plt.subplot(1, 2, 1)
    plt.plot(input_mean, label='Mean')
    plt.fill_between(range(len(input_mean)), input_mean-input_std, input_mean+input_std, alpha=0.3, label='Std')
    plt.title('Input Feature Distribution')
    plt.xlabel('Feature Index')
    plt.ylabel('Value')
    plt.legend()
    
    # Visualize label distribution
    plt.subplot(1, 2, 2)
    plt.plot(label_mean, label='Mean')
    plt.fill_between(range(len(label_mean)), label_mean-label_std, label_mean+label_std, alpha=0.3, label='Std')
    plt.title('Label Distribution')
    plt.xlabel('Feature Index')
    plt.ylabel('Value')
    plt.legend()
    
    plt.tight_layout()
    plt.savefig(os.path.join(save_dir, 'data_distribution.pdf'))
    plt.close()
    
    # Visualize input feature correlation matrix
    plt.figure(figsize=(10, 8))
    corr_matrix = np.corrcoef(input_data.cpu().numpy().T)
    plt.imshow(corr_matrix, cmap='coolwarm', vmin=-1, vmax=1)
    plt.colorbar()
    plt.title('Input Feature Correlation Matrix')
    plt.savefig(os.path.join(save_dir, 'input_correlation.pdf'))
    plt.close()


def save_dataset_to_excel(input_data, label_data, save_path=None, feature_names=None, label_names=None):
    """
    Save dataset to Excel file
    
    Args:
        input_data: Input data tensor
        label_data: Label data tensor
        save_path: Save path for Excel file, default is 'dataset.xlsx'
        feature_names: Names of input features, default is None (will use indices)
        label_names: Names of output labels, default is None (will use indices)
        
    Returns:
        bool: True if successful, False otherwise
    """
    try:
        import pandas as pd
        
        # Convert tensors to numpy arrays
        input_np = input_data.cpu().numpy()
        label_np = label_data.cpu().numpy()
        
        # Create feature and label names if not provided
        if feature_names is None:
            feature_names = [f'Feature_{i}' for i in range(input_np.shape[1])]
        
        if label_names is None:
            label_names = [f'Label_{i}' for i in range(label_np.shape[1])]
        
        # Create DataFrames
        input_df = pd.DataFrame(input_np, columns=feature_names)
        label_df = pd.DataFrame(label_np, columns=label_names)
        
        # Create combined DataFrame (input on the left, label on the right)
        # This ensures input and label data are horizontally combined with rows preserved
        combined_df = pd.concat([input_df, label_df], axis=1)
        
        # Set default save path if not provided
        if save_path is None:
            save_path = 'dataset.xlsx'
        
        # Create directory if it doesn't exist
        os.makedirs(os.path.dirname(os.path.abspath(save_path)), exist_ok=True)
        
        # Save to Excel file
        with pd.ExcelWriter(save_path, engine='openpyxl') as writer:
            # Save main sheet with combined data (input on left, label on right)
            combined_df.to_excel(writer, sheet_name='Dataset', index=False)
            
            # Save separate sheets for reference
            input_df.to_excel(writer, sheet_name='Input_Features', index=False)
            label_df.to_excel(writer, sheet_name='Output_Labels', index=False)
            
            # Create a summary sheet
            summary_data = {
                'Statistic': ['Count', 'Mean', 'Std', 'Min', '25%', '50%', '75%', 'Max'],
                'Input_Features': [
                    input_np.shape[0],
                    np.mean(input_np),
                    np.std(input_np),
                    np.min(input_np),
                    np.percentile(input_np, 25),
                    np.percentile(input_np, 50),
                    np.percentile(input_np, 75),
                    np.max(input_np)
                ],
                'Output_Labels': [
                    label_np.shape[0],
                    np.mean(label_np),
                    np.std(label_np),
                    np.min(label_np),
                    np.percentile(label_np, 25),
                    np.percentile(label_np, 50),
                    np.percentile(label_np, 75),
                    np.max(label_np)
                ]
            }
            pd.DataFrame(summary_data).to_excel(writer, sheet_name='Summary', index=False)
        
        print(f"Dataset successfully saved to {save_path}")
        return True
    except ImportError:
        print("Pandas or openpyxl not installed. Saving to CSV files instead.")
        
        # Convert tensors to numpy arrays
        input_np = input_data.cpu().numpy()
        label_np = label_data.cpu().numpy()
        
        # Set default save paths
        if save_path is None:
            input_path = 'input_data.csv'
            label_path = 'label_data.csv'
            combined_path = 'combined_dataset.csv'
        else:
            base_path = os.path.splitext(save_path)[0]
            input_path = f"{base_path}_input.csv"
            label_path = f"{base_path}_label.csv"
            combined_path = f"{base_path}_combined.csv"
        
        # Create directory if it doesn't exist
        os.makedirs(os.path.dirname(os.path.abspath(input_path)), exist_ok=True)
        
        # Create combined headers and data (input on left, label on right)
        combined_data = np.hstack((input_np, label_np))
        combined_headers = []
        if feature_names:
            combined_headers.extend(feature_names)
        if label_names:
            combined_headers.extend(label_names)
            
        # Save to CSV files
        # Save combined data as the main file
        np.savetxt(combined_path, combined_data, delimiter=',', 
                  header=','.join(combined_headers) if combined_headers else '')
        
        # Save individual files for reference
        np.savetxt(input_path, input_np, delimiter=',', header=','.join(feature_names) if feature_names else '')
        np.savetxt(label_path, label_np, delimiter=',', header=','.join(label_names) if label_names else '')
        
        print(f"Combined dataset (input+label) saved to {combined_path}")
        print(f"Input data also saved to {input_path}")
        print(f"Label data also saved to {label_path}")
        print(f"Combined shape: {combined_data.shape[0]} rows × {combined_data.shape[1]} columns")
        print(f"  - Input columns: {input_np.shape[1]}")
        print(f"  - Label columns: {label_np.shape[1]}")
        return True
    except Exception as e:
        print(f"Error saving dataset: {e}")
        return False


if __name__ == "__main__":
    from ddeepc_args import update_variables, build_args
    
    # Load configuration parameters
    args = build_args(**update_variables)
    
    # Get training and test sets
    train_loader, test_loader, input_data, label_data = get_datasets(args.args)
    
    # Print dataset information
    print(f"Number of training batches: {len(train_loader)}")
    print(f"Number of test batches: {len(test_loader)}")
    
    # View a batch of data
    for inputs, labels in train_loader:
        print(f"Batch input shape: {inputs.shape}")
        print(f"Batch label shape: {labels.shape}")
        break
    
    # Save dataset to Excel
    save_path = os.path.join(args.args['data_dir'], 'dataset.xlsx')
    
    # Create custom feature and label names
    u_dim = args.args['u_dim']
    y_dim = args.args['y_dim']
    Tini = args.args['Tini']
    Np = args.args['Np']
    
    # Create feature names
    feature_names = []
    # Add uini features
    for t in range(Tini):
        for i in range(u_dim):
            feature_names.append(f'u_ini_t{t}_dim{i}')
    # Add yini features
    for t in range(Tini):
        for i in range(y_dim):
            feature_names.append(f'y_ini_t{t}_dim{i}')
    # Add u_e and y_e features
    for i in range(u_dim):
        feature_names.append(f'u_e_dim{i}')
    for i in range(y_dim):
        feature_names.append(f'y_e_dim{i}')
    
    # Create label names
    label_names = []
    # Add future u features
    for t in range(Np):
        for i in range(u_dim):
            label_names.append(f'u_future_t{t}_dim{i}')
    # Add future y features
    for t in range(Np):
        for i in range(y_dim):
            label_names.append(f'y_future_t{t}_dim{i}')
    
    # Save dataset with descriptive column names
    save_dataset_to_excel(input_data, label_data, save_path, feature_names, label_names)