import numpy as np
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
from copy import deepcopy
import math
import flash_attention
import torch.distributions as dist

# 可视化
import matplotlib.pyplot as plt
from tensorflow.python.tools.optimize_for_inference_lib import INPUT_ORDER


############################################ 多分辨率神经网络 ############################################
class MRCNN(nn.Module):
    def __init__(self, in_channels = 4, drate = 0.5):
        super(MRCNN, self).__init__()  # 调用父类构造函数，允许子类使用父类的函数
        print("MRCNN __init__")
        self.GELU = nn.GELU()  # 定义激活函数，便于在前向传播函数使用

        # 第一个特征提取部分 (features1)
        self.features1 = nn.Sequential(
            # 第一层卷积: 输入 4 通道 → 输出 64 通道
            nn.Conv1d( in_channels, 64, kernel_size=50, stride=6, padding=24, bias=False),
            nn.BatchNorm1d(64),
            self.GELU,
            nn.MaxPool1d(kernel_size=8, stride=2, padding=3),
            nn.Dropout(drate),

            # 第二层卷积: 64 → 128
            nn.Conv1d(64, 128, kernel_size=8, stride=1, padding=3, bias=False),
            self.GELU,

            # 第三层卷积: 128 → 128
            nn.Conv1d(128, 128, kernel_size=8, stride=1, padding=3, bias=False),
            nn.BatchNorm1d(128),
            self.GELU,
            nn.MaxPool1d(kernel_size=4, stride=4, padding=1)
        )

        # 第二个特征提取部分 (features2)
        self.features2 = nn.Sequential(
            # 第一层卷积: 输入 4 通道 → 输出 64 通道
            nn.Conv1d(4, 64, kernel_size=400, stride=50, padding=200, bias=False),
            nn.BatchNorm1d(64),
            self.GELU,
            nn.MaxPool1d(kernel_size=4, stride=2, padding=1),
            nn.Dropout(drate),

            # 第二层卷积: 64 → 128
            nn.Conv1d(64, 128, kernel_size=7, stride=1, padding=3, bias=False),
            nn.BatchNorm1d(128),
            self.GELU,

            # 第三层卷积: 128 → 128
            nn.Conv1d(128, 128, kernel_size=7, stride=1, padding=3, bias=False),
            nn.BatchNorm1d(128),
            self.GELU,
            nn.MaxPool1d(kernel_size=2, stride=2, padding=1)
        )

        # 自适应池化统一时间维度
        self.adaptive_pool = nn.AdaptiveAvgPool1d(64)  # 统一为 64

        # 特征融合层
        self.fusion = nn.Sequential(
            nn.Conv1d(128, 128, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm1d(128),
            self.GELU,
            nn.Dropout(drate)
        )

    def forward(self, x):

        # 分支1
        x1 = self.features1(x)  # (256, 128, 159)
        x1 = self.adaptive_pool(x1)  # (256, 128, 64)

        # 分支2
        x2 = self.features2(x)  # (256, 128, 38)
        x2 = self.adaptive_pool(x2)  # (256, 128, 64)

        # 特征融合
        x_fused = torch.cat([x1, x2], dim=2)  # (256, 128, 128)
        x_fused = self.fusion(x_fused)  # (256, 128, 128)

        return x_fused


class BaseModel(nn.Module):
    def __init__(self):
        super(BaseModel, self).__init__()
        print("BaseModel __init__")

        # 模型相关参数
        self.drop_rate = 0.6
        self.feature_channels = 128
        self.feature_sequence_length = 128
        self.num_classes = 5

        # 初始化 MRCNN（输入通道改为 4）
        self.mrcnn = MRCNN(in_channels = 4, drate = self.drop_rate)

        # 分类头
        self.classifier = nn.Sequential(
            nn.Linear(128 * 128, 512),
            nn.GELU(),
            nn.Dropout(self.drop_rate),
            nn.Linear(512, self.num_classes)
        )

        # 全连接层
        self.fc = nn.Linear(self.feature_channels * self.feature_sequence_length, self.num_classes)  # 自动匹配维度


    def forward(self, x):
        # 提取特征
        features = self.mrcnn(x)  # 输出形状: (batch, channels, time)

        flattened_features = features.reshape(features.size(0), -1)  # (256, 128 * 128)

        logits = self.classifier(flattened_features)  # (256, 5)


        return logits
