import torch
import torch.nn as nn
import logging
from typing import Dict
import torchvision 

class single_view_screening(torch.nn.Module):
    def __init__(self):
        super().__init__()
        # 分类器
        self.fc = nn.Sequential(
            nn.BatchNorm1d(512),
            nn.Dropout(0.4),
            nn.Sigmoid(),
            nn.Linear(512, 128),
            nn.BatchNorm1d(128),
            nn.Sigmoid(),
            nn.Linear(128, 1),
            nn.Sigmoid(),
        )

    def forward(self, x: torch.Tensor):
        x = self.fc(x)
        return x
    
class single_view_diagnosis(torch.nn.Module):
    def __init__(self):
        super().__init__()
        # 分类器
        self.fc = nn.Sequential(
            nn.LayerNorm(512),
            nn.Dropout(0.2),
            nn.Sigmoid(),
            nn.Linear(512, 256),
            nn.LayerNorm(256),
            nn.Sigmoid(),
            nn.Linear(256, 128),
            nn.LayerNorm(128),
            nn.Sigmoid(),
            nn.Linear(128, 64),
            nn.LayerNorm(64),
            nn.Sigmoid(),
            nn.Linear(64, 5),
            nn.LayerNorm(5),
            nn.Sigmoid(),
        )

    def forward(self, x):
        x = self.fc(x)
        return x

class multi_view_diagnosis(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.fc = nn.Sequential(
            nn.LayerNorm(1024),
            nn.Dropout(0.2),
            nn.Sigmoid(),
            nn.Linear(1024, 512),
            nn.LayerNorm(512),
            nn.Sigmoid(),
            nn.Linear(512, 256),
            nn.LayerNorm(256),
            nn.Sigmoid(),
            nn.Linear(256, 128),
            nn.LayerNorm(128),
            nn.Sigmoid(),
            nn.Linear(128, 64),
            nn.LayerNorm(64),
            nn.Sigmoid(),
            nn.Linear(64, 5),
            nn.LayerNorm(5),
            nn.Sigmoid(),
        )

    def forward(self, x: torch.Tensor):
        x = torch.concat([x[0], x[1]], dim=1)
        x = x.squeeze()
        x = self.fc(x)
        return x

class MyoEcho():
    def __init__(self, device:str|torch.device):
        """
        模型初始化
        """
        self.device:str = device
        self.logger = logging.getLogger(__name__)
        self.logger.info(f"模型运行设备: {self.device}")
        # 加载编码器
        self.encoder:torch.nn.Module = torchvision.models.video.mvit_v2_s()
        self.encoder.head[-1] = torch.nn.Linear(
        self.encoder.head[-1].in_features, 512)
        checkpoint = torch.load(r"modelData/Encoder.pt",map_location="cpu")
        self.encoder.load_state_dict(checkpoint)
        self.encoder.eval()
        self.encoder.to(device)
        self.logger.info("Encoder模型加载成功")
        # 诊断模型
        self.diagnosis_decoder_map:Dict[str, torch.nn.Module] = dict()
        # 筛查模型
        self.screening_decoder_map:Dict[str, torch.nn.Module] = dict()

        self.label_map = {
            0:'RCM',  1:'DCM', 2:'ARVC', 3:'NDLVC', 4:'HCM'
        }
        self.label = []
        self.registered_model()

    def load_sing_view_screening_model(self, model_weight:str):
        decoder = single_view_screening()
        weight_data = torch.load(model_weight, map_location='cpu')
        decoder.load_state_dict(weight_data["model"])
        decoder.to(self.device)
        decoder.eval()
        return decoder
    
    def load_sing_view_diagnosis_model(self, model_weight:str):
        decoder = single_view_diagnosis()
        weight_data = torch.load(model_weight, map_location='cpu')
        decoder.load_state_dict(weight_data["model"])
        decoder.to(self.device)
        decoder.eval()
        return decoder
    
    def load_multi_view_diagnosis_model(self, model_weight:str):
        decoder = multi_view_diagnosis()
        weight_data = torch.load(model_weight, map_location='cpu')
        decoder.load_state_dict(weight_data["model"])
        decoder.to(self.device)
        decoder.eval()
        return decoder


    def registered_model(self):
        # 诊断模型
        self.diagnosis_decoder_map["PLAX"] =  self.load_sing_view_diagnosis_model(r"modelData/DiagnosisModelData/PLAX.pt")
        self.logger.info("PLAX诊断模型加载成功")
        self.diagnosis_decoder_map["A2C"] =  self.load_sing_view_diagnosis_model(r"modelData/DiagnosisModelData/A2C.pt")
        self.logger.info("A2C诊断模型加载成功")
        self.diagnosis_decoder_map["A3C"] =  self.load_sing_view_diagnosis_model(r"modelData/DiagnosisModelData/A3C.pt")
        self.logger.info("A3C诊断模型加载成功")
        self.diagnosis_decoder_map["A4C"] =  self.load_sing_view_diagnosis_model(r"modelData/DiagnosisModelData/A4C.pt")
        self.logger.info("A4C诊断模型加载成功")
        self.diagnosis_decoder_map["A2C + A4C"] =  self.load_multi_view_diagnosis_model(r"modelData/DiagnosisModelData/A2C+A4C.pt")
        self.logger.info("A2C+A4C诊断模型加载成功")
        self.diagnosis_decoder_map["A3C + A4C"] =  self.load_multi_view_diagnosis_model(r"modelData/DiagnosisModelData/A3C+A4C.pt")
        self.logger.info("A3C+A4C诊断模型加载成功")
        # 筛查模型
        self.screening_decoder_map["PLAX"] =self.load_sing_view_screening_model(r"modelData/ScreeningModelData/PLAX.pt")
        self.logger.info("PLAX筛查模型加载成功")
        self.screening_decoder_map["A2C"] = self.load_sing_view_screening_model(r"modelData/ScreeningModelData/A2C.pt")
        self.logger.info("A2C筛查模型加载成功")
        self.screening_decoder_map["A3C"] = self.load_sing_view_screening_model(r"modelData/ScreeningModelData/A3C.pt")
        self.logger.info("A3C筛查模型加载成功")
        self.screening_decoder_map["A4C"] = self.load_sing_view_screening_model(r"modelData/ScreeningModelData/A4C.pt")
        self.logger.info("A4C筛查模型加载成功")

    @torch.no_grad()
    def predict_diagnosis(self,data:torch.Tensor, view:str):
        """
        模型预测
        """
        if not view in self.diagnosis_decoder_map.keys():
            self.logger.error(f"{view} 无匹配模型")
            return torch.zeros(5)
        decoder = self.diagnosis_decoder_map[view]
        if '+' in view:
            # 多视图模型
            feature1  = self.encoder(data[0])
            feature2  = self.encoder(data[1])
            feature = torch.stack([feature1, feature2])
            predict =  decoder(feature).softmax(-1).squeeze()
        else:
            feature = self.encoder(data)
            predict =  decoder(feature).softmax(-1).squeeze()
        format_out = {}
        for i in range(5):
            format_out[self.label_map[i]] = predict[i].item()
        return format_out

    @torch.no_grad()
    def predict_screening(self,data:torch.Tensor, view:str):
        """
        模型预测
        """
        if not view in self.screening_decoder_map.keys():
            self.logger.error(f"{view} 无匹配模型")
            return torch.zeros(5)
        decoder = self.screening_decoder_map[view]
        feature = self.encoder(data)
        predict =  decoder(feature)
        format_out = {"Probability":predict.item()}
        return format_out
    
class ViewClassifier:
    def __init__(self, device) -> None:
        self.device = torch.device(device)
        self.model = torchvision.models.convnext_base()
        self.load_model_weight()

    def load_model_weight(self):
        # 视图分类模型
        vc_checkpoint = torch.load("modelData/view_classifier.ckpt", map_location='cpu')
        vc_state_dict = {key[6:]: value for key,value in vc_checkpoint['state_dict'].items()}
        self.model.classifier[-1] = torch.nn.Linear(
            self.model.classifier[-1].in_features, 11)
        self.model.load_state_dict(vc_state_dict)
        self.model.to(self.device)
        self.model.eval()

    def get_views(self, out_logits):
        """
        获取视频数据视图
        """
        COARSE_VIEWS = ['A2C',
                        'A3C',
                        'A4C',
                        'A5C',
                        'Apical_Doppler',
                        'Doppler_Parasternal_Long',
                        'Doppler_Parasternal_Short',
                        'Parasternal_Long',
                        'Parasternal_Short',
                        'SSN',
                        'Subcostal']
        out_views = torch.argmax(out_logits[:, [0, 1, 2, 7]], dim=1)
        views = ['A2C',
                 'A3C',
                 'A4C',
                 'PLAX']
        view_list = [views[v] for v in out_views]
        return view_list

    @torch.no_grad()
    def predict(self, data):
        with torch.no_grad():
            predict = self.model(data.float().to(self.device))
            views = self.get_views(predict)
            return views