import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, random_split
import matplotlib.pyplot as plt
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import logging
from datetime import datetime
from mamba_ssm import Mamba2

timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")

# 数据预处理函数
def preprocess_channel(data):
    min_val = np.min(data)
    max_val = np.max(data)
    if max_val == min_val:
        return np.zeros(32000)
    normalized = (data - min_val) / (max_val - min_val)
    L = len(normalized)
    new_indices = np.linspace(0, L-1, 32000)
    return np.interp(new_indices, np.arange(L), normalized)

# 数据加载
def load_data(root_dir):
    all_data = []
    all_targets = []
    threshold = 160

    for n in [6]:
        wear_path = os.path.join(root_dir, f'c{n}', f"c{n}_wear.csv")
        if not os.path.exists(wear_path):
            continue

        wear_df = pd.read_csv(wear_path, header=None)
        tool_data = []
        tool_targets = []
        m = 1
        while True:
            data_path = os.path.join(root_dir, f'c{n}', f'c{n}', f'c_{n}_{m:03d}.csv')
            if not os.path.exists(data_path):
                break

            # 处理传感器数据
            data_df = pd.read_csv(data_path, header=None).iloc[:, :7]
            processed = []
            for col in range(7):
                channel_data = data_df.iloc[:, col].values
                processed.append(preprocess_channel(channel_data))
            tool_data.append(np.array(processed))  # (7, 20000)

            # 读取磨损值
            wear_values = wear_df.iloc[m - 1, -3:].values.astype(float)
            tool_targets.append(np.mean(wear_values))
            m += 1

        tool_data = np.array(tool_data)
        tool_targets = np.array(tool_targets)

        # 转换标签
        converted_targets = np.zeros_like(tool_targets)
        for i in range(len(tool_targets)):
            if tool_targets[i] < threshold:
                converted_targets[i] = 0
                # 处理前面的标签
                for j in range(1, i + 1):
                    converted_targets[i - j] = j
                # 处理后面的标签
                for j in range(1, len(tool_targets) - i):
                    converted_targets[i + j] = -j

        all_data.append(tool_data)
        all_targets.append(converted_targets)

    # 合并所有刀具的数据
    all_data = np.concatenate(all_data, axis=0)
    all_targets = np.concatenate(all_targets, axis=0)

    return all_data, all_targets

# 数据集类
class ToolDataset(Dataset):
    def __init__(self, data, targets):
        self.data = data
        self.targets = targets

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return torch.FloatTensor(self.data[idx]), torch.FloatTensor([self.targets[idx]])

# ResNet模块
class ResNet1D(nn.Module):
    def __init__(self, block, layers, in_channels):
        """
        ResNet 1D 架构
        :param block: 基础块（如 Bottleneck 或 BasicBlock）
        :param layers: 每个阶段的块数量
        :param in_channels: 输入通道数
        :param num_classes: 输出类别数
        """
        super(ResNet1D, self).__init__()
        self.in_channels = 64

        # Stem 部分 (batchsize,7,32000)
        self.conv1 = nn.Conv1d(in_channels, self.in_channels, kernel_size=7, stride=2, padding=3, bias=False)
        self.bn1 = nn.BatchNorm1d(self.in_channels)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
        

        # ResNet 阶段 (batchsize,64,1250)
        self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)



    def _make_layer(self, block, planes, blocks, stride=1):
        """
        创建 ResNet 的一个阶段
        :param block: 基础块
        :param planes: 输出通道数
        :param blocks: 块数量
        :param stride: 步幅
        """
        downsample = None
        if stride != 1 or self.in_channels != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv1d(self.in_channels, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm1d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.in_channels, planes, stride, downsample))
        self.in_channels = planes * block.expansion
        for _ in range(1, blocks):
            layers.append(block(self.in_channels, planes))

        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x2 = self.layer2(x)
        x3 = self.layer3(x2)
        x4 = self.layer4(x3)
        return x2, x3, x4


class Bottleneck1D(nn.Module):
    expansion = 4

    def __init__(self, in_channels, planes, stride=1, downsample=None):
        """
        Bottleneck 块，用于 ResNet 1D
        :param in_channels: 输入通道数
        :param planes: 中间层通道数
        :param stride: 步幅
        :param downsample: 下采样层
        """
        super(Bottleneck1D, self).__init__()
        self.conv1 = nn.Conv1d(in_channels, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm1d(planes)

        self.conv2 = nn.Conv1d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn2 = nn.BatchNorm1d(planes)

        self.conv3 = nn.Conv1d(planes, planes * self.expansion, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm1d(planes * self.expansion)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample

    def forward(self, x):
        identity = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            identity = self.downsample(x)

        out += identity
        out = self.relu(out)

        return out

# 完整模型
class WearModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.in_channels = 7
        self.out_channels = 512
        self.x2_len = 4000
        self.x3_len = 2000
        self.x4_len = 1000
        
        self.resnet = ResNet1D(Bottleneck1D, [3, 4, 6, 3], self.in_channels)
        encoder_layer = nn.TransformerEncoderLayer(
            d_model=1000, nhead=25, dim_feedforward=2048, dropout=0.1)
        self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=1)

        self.mambad = Mamba2(
        # This module uses roughly 3 * expand * d_model^2 parameters
            d_model=256, # Model dimension d_model
            d_state=64,  # SSM state expansion factor, typically 64 or 128
            d_conv=4,    # Local convolution width
            expand=2,    # Block expansion factor
        ).to("cuda")

        self.conv1x1 = nn.Sequential(
            nn.Conv1d(1024, 256, kernel_size=1, stride=1, bias=False),
            nn.BatchNorm1d(256),
        )

        self.conv1x2 = nn.Sequential(
            nn.Conv1d(2048, 256, kernel_size=1, stride=1, bias=False),
            nn.BatchNorm1d(256),
        )

        self.downsample4 = nn.AdaptiveAvgPool1d(self.x4_len)

        self.pool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Sequential(
            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.ReLU(),
            nn.Linear(128, 1)
        )

    # def forward(self, x):
    #     # print(x.shape)
    #     x = self.resnet(x)         # (batch, 512, 1250)
    #     # print(x.shape)
    #     x = x.permute(2, 0, 1)    # (1250, batch, 512)
    #     x = self.transformer(x)    # (1250, batch, 512)
    #     # print(x.shape)
    #     x = x.permute(1, 2, 0)    # (batch, 512, 1250)
    #     x = self.pool(x).squeeze() # (batch, 512)
    #     # print(x.shape)
    #     return self.fc(x).squeeze() # (batch)

    def forward(self, x):
        x2,x3,x4 = self.resnet(x)         # (batch, 512, 1250)
        x3=self.conv1x1(x3)         # (batch, 256, 2000)
        x3 = x3.permute(0,2,1)    # (batch, 2000, 256)
        # x = self.transformer(x)    # (512, batch, 1250)
        # print(x3.shape)
        x3 = self.mambad(x3)         #(batch, 2000, 256)
        # print(y.shape)
        x3 = x3.permute(0,2,1)    # (batch, 256, 2000)
        x3 =self.downsample4(x3)

        x4=self.conv1x2(x4)         # (batch, 256, 1000)
        x4 = self.transformer(x4)    #

        
        x = torch.cat((x3,x4),dim=1) # (batch, 512, 1000)

        x = self.pool(x).squeeze() # (batch, 512)
        # print(x.shape)
        return self.fc(x).squeeze() # (batch)

# 日志配置
def setup_logger():
    # 创建日志目录
    log_dir = "logs"
    os.makedirs(log_dir, exist_ok=True)
    
    # 生成时间戳
    log_file = os.path.join(log_dir, f"training_{timestamp}.log")
    
    # 配置日志
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s - %(levelname)s - %(message)s",
        handlers=[
            logging.FileHandler(log_file),
            logging.StreamHandler()
        ]
    )
    logging.info(f"Training log initialized at {log_file}")

# 训练函数
def train_model(data_dir, batch_size=32, epochs=100, lr=1e-4):
    setup_logger()  # 初始化日志
    try:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        logging.info(f"Using device: {device}")
        
        # 数据加载
        logging.info("Start loading data...")
        data, targets = load_data(data_dir)
        logging.info(f"Data loaded: {len(data)} samples")

        # 数据集划分
        dataset = ToolDataset(data, targets)
        train_set, val_set = random_split(dataset, [int(0.8*len(dataset)), 
                                          len(dataset)-int(0.8*len(dataset))])
        
        logging.info(f"Dataset split: {len(train_set)} train, {len(val_set)} validation")
    
        # 数据加载器
        train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
        val_loader = DataLoader(val_set, batch_size=batch_size)
    
        # 初始化模型
        model = WearModel().to(device)
        logging.info(f"Model architecture:\n{model}")
        
        # 优化器和损失函数
        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=lr)
        logging.info(f"Optimizer: {optimizer.__class__.__name__} with lr={lr}")
    
        # 训练跟踪
        best_metrics = {'mae': float('inf')}
        best_mae = float('inf')
        train_losses = []
        val_maes = []
        val_rmses = []  # 新增RMSE跟踪
        val_r2s = []    # 新增R²跟踪
        
        for epoch in range(epochs):
            model.train()
            epoch_loss = 0
            for batch_idx, (inputs, targets) in enumerate(train_loader):
                try:
                    inputs, targets = inputs.to(device), targets.to(device)
                    optimizer.zero_grad()
                    outputs = model(inputs)
                    loss = criterion(outputs, targets.squeeze())
                    loss.backward()
                    optimizer.step()
                    epoch_loss += loss.item()
                # 每10%进度记录一次
                    if batch_idx % max(1, len(train_loader)//10) == 0:
                        logging.debug(f"Epoch {epoch+1} | Batch {batch_idx}/{len(train_loader)} | "
                                    f"Batch Loss: {loss.item():.4f}")
                except Exception as e:
                    logging.error(f"Training error at batch {batch_idx}: {str(e)}")
                    raise
            train_losses.append(epoch_loss/len(train_loader))
            
            # 验证
            model.eval()
            total_mae = 0
            all_preds = []
            all_targets = []
            with torch.no_grad():
                for inputs, targets in val_loader:
                    inputs, targets = inputs.to(device), targets.to(device)
                    outputs = model(inputs)
                    # 收集预测和真实值
                    all_preds.append(outputs.cpu().numpy())
                    all_targets.append(targets.squeeze().cpu().numpy())

                    total_mae += torch.abs(outputs - targets.squeeze()).sum().item()
            # 合并所有验证结果
            all_preds = np.concatenate(all_preds)
            all_targets = np.concatenate(all_targets)
            
            # 计算各项指标
            val_mae = total_mae / len(val_set)
            mse = np.mean((all_preds - all_targets)**2)
            val_rmse = np.sqrt(mse)
            
            # 计算R²
            ss_tot = np.sum((all_targets - np.mean(all_targets))**2)
            ss_res = np.sum((all_targets - all_preds)**2)
            val_r2 = 1 - (ss_res / ss_tot) if ss_tot != 0 else 0

            
            # 记录指标
            val_maes.append(val_mae)
            val_rmses.append(val_rmse)
            val_r2s.append(val_r2)

             # 记录epoch日志
            logging.info(f"Epoch {epoch+1}/{epochs} \t "
                        f"Train Loss: {train_losses[-1]:.4f} \t "
                        f"Val MAE: {val_mae:.4f} \t "
                        f"Val RMSE: {val_rmse:.4f} \t "
                        f"R2: {val_r2:.4f}")
            
            # 修改打印信息
            print(f'Epoch {epoch+1}/{epochs} | '
                f'Train Loss: {train_losses[-1]:.4f} | '
                f'Val MAE: {val_mae:.4f} | '
                f'Val RMSE: {val_rmse:.4f} | '
                f'R²: {val_r2:.4f}')
            
            # 保存最佳模型（仍以MAE为标准）
            if val_mae < best_mae:
                best_mae = val_mae
                torch.save(model, f'./logs/best_model0_{timestamp}.pth')
                torch.save(model.state_dict(), f'./logs/best_model1_{timestamp}.pth')
                best_metrics.update({
                    'epoch': epoch+1,
                    'mae': val_mae,
                    'rmse': val_rmse,
                    'r2': val_r2
                })
                
                logging.info(f"New best model saved at epoch {epoch+1} with MAE: {val_mae:.4f}")
        
        # 训练完成日志
        logging.info("Training completed. Best metrics:")
        for k, v in best_metrics.items():
            logging.info(f"{k.upper():<8}: {v}")
            
    except Exception as e:
        logging.critical(f"Training process failed: {str(e)}", exc_info=True)
        raise

    # 绘制训练曲线
    plt.figure(figsize=(12,5))
    plt.subplot(1,2,1)
    plt.plot(train_losses, label='Training Loss')
    plt.legend()
    
    plt.subplot(1,2,2)
    plt.plot(val_maes, label='Val MAE')
    plt.plot(val_rmses, label='Val RMSE')
    plt.plot(val_r2s, label='R² Score')
    plt.legend()
    plt.savefig(f'training_metrics_{timestamp}.png')
    plt.close()

if __name__ == '__main__':
    train_model(data_dir='./Code/data', epochs=100, batch_size=16, lr=1e-4)
    # [ATLUS] Megaten Music Collection (Shin Megami Tensei, Persona, DDS etc.) - 2007-2013, MP3, 320 kbps