import os
import shutil
import json
from pathlib import Path
import torch
import gradio as gr
import numpy as np
from pathlib import Path
import pydicom
from decord import VideoReader
from random import randint
from torchvision.transforms.functional import resize
from .MyoEcho import MyoEcho, ViewClassifier
import logging
from .UserManage import UserManager
from .utils import sha256_js

class ModelPredictServer():
     # 视频采样帧间隔
    VIDEO_SAMPLE_INTERNAL = 2
    # 视频采样帧数
    VIDEO_SAMPLE_NUM = 16

    def __init__(self,support_data_suffix:list[str],server_name:str,server_port:int, storage_path:str = "~", device="cpu"):
        # 支持的文件后缀
        self.support_data_suffix:list[str] = support_data_suffix
        # 服务地址
        self.server_name:str = server_name
        # 服务端口
        self.server_port:int = server_port
        # 文件持久化路径
        self.storage_path:str = storage_path
        # 均值
        self.mean:float = 28.761289666666666
        # 方差
        self.std:float = 47.21568333333334
        # 模型
        self.device = device
        self.MyoEcho = MyoEcho(self.device)
        # 视图分类模型
        self.view_classifier = ViewClassifier(self.device)
        # 用户认证
        self.user_manager = UserManager()
        # 参数检查
        self.param_check()
        # 自定义JavaScript代码，用于在发送前处理密码
        self.login_js = """
        function preprocessLogin(username, password) {
            // 使用SHA-256对密码进行哈希处理
            if(username.trim() === "" || password.trim() === ""){
            return [null, null]
            }
            const hashHex = sha256(password);
            return [username, hashHex];
        }
        """
        self.logger = logging.getLogger(__name__)

    def param_check(self):
        """
        参数检查
        """
        if self.support_data_suffix is None or len(self.support_data_suffix) == 0:
            raise RuntimeError("support_data_suffix is None or empty!")
        if len(self.server_name) == 0:
            raise RuntimeError("server_name is empty!")
        if self.storage_path is None or len(self.storage_path) == 0:
            raise RuntimeError("storage_path is None or empty!")
        # 权限判断
        if not os.access(self.storage_path,os.W_OK):
            raise RuntimeError(f"storage_path:{self.storage_path} is not write!")

    def create_interface(self):
        """
        主界面
        """
        with gr.Blocks(title="心肌病诊断模型", theme=gr.themes.Soft(font=[gr.themes.GoogleFont("Inter"), "sans-serif"]),css_paths="css/style.css", fill_height=True,fill_width=True,head=sha256_js) as demo:
            # 存储会话数据
            self.stat = gr.State({})
            # 模型预测页面
            self.model_column = self.model_interface()
            # 登录页面
            self.login_column = self.login_interface()
        return demo
    
    def process_hashed_login(self, user_name, hashed_password, session_info):
        """账号密码认证"""
        # 账号密码是否未空
        if not user_name or not hashed_password:  # 账号或密码为空
            raise gr.Error("账号或密码为空！",print_exception=False)
        # 实际验证逻辑
        ok, _, remark = self.user_manager.authenticate(user_name, hashed_password)
        if not ok:
            raise gr.Error("用户名或密码错误！",print_exception=False)
        # 记录用户信息
        session_info["userName"] = user_name
        session_info["remark"] = remark
        # 创建用户文件夹
        if not os.path.exists(os.path.join(self.storage_path, user_name)):
            try:
                os.mkdir(os.path.join(self.storage_path, user_name))
            except Exception as e:
                self.logger.error(str(e))
                return gr.update(visible=True), gr.update(visible=False)
        return gr.update(visible=False), gr.update(visible=True)
        
    def login_interface(self):
        with gr.Column(visible=True, elem_id="login_column",min_width=300) as column:
            username = gr.Textbox(label="用户名", placeholder="请输入用户名",max_length=80)
            password = gr.Textbox(label="密码", placeholder="请输入密码", type="password",max_length=80)
            login_btn = gr.Button("登录", variant="primary")
            # 添加自定义JavaScript
            login_btn.click(
                fn=self.process_hashed_login,
                inputs=[username, password, self.stat],
                js=self.login_js,
                outputs=[column, self.model_column]
            )
        return column

    def model_interface(self):
        """
        模型界面
        """
        with gr.Column(visible=False, elem_id="model_predict") as column:
            gr.Markdown(f"# 超声心动视频分析", padding=True,elem_id="title")
            gr.Markdown(f"<h2>上传超声心动视频或DCM文件，模型将预测患者可能患各类疾病的概率。</h2>")
            gr.Markdown("单视图模型目前前支持**PLAX**、**A2C**、**A3C**和**A4C**视图，多视图模型目前支持**A2C+A4C**和**A3C+A4C**。\n\n平台目前支持的输入格式为: `avi`和`mp4`格式的超声心动视频文件以及以`.dcm`为后缀的DCM文件.")
            with gr.Tab("筛查模型"):
                self.single_view_screening_interface()            
            with gr.Tab("单视图诊断模型"):
                self.single_view_diagnosis_interface()
            with gr.Tab("多视图诊断模型"):
                self.multi_view_diagnosis_interface()
            with gr.Tab("自动视图分类筛查"):
                self.auto_classify_single_view_screening_interface()
            with gr.Tab("自动视图分类诊断"):
                self.auto_classify_multi_view_diagnosis_interface()
        return column

    def single_view_screening_interface(self):
        """
        单视图筛查模型界面
        """
        gr.Label("筛查模型", show_label=False)
        # 模型选择 注册监听事件
        radio = gr.Radio(["PLAX","A2C","A3C","A4C"], label="请选择模型", value="PLAX", interactive=True)
        # 上传文件
        file_input = gr.File(label=f"PLAX视图数据", visible=True, file_count='single', container=True)
        # 预测按钮
        analyze_btn = gr.Button("分析")
        # 预测结果展示
        result_output = gr.Label("模型预测结果")

        def modelChange(view: str):
            label = f"{view}视图数据"
            return gr.update(label=label, value=None)
        
        radio.change(fn=modelChange, inputs=radio, outputs=file_input)
        
        # 设置按钮事件，根据数据类型选择不同的输入
        analyze_btn.click(
            fn=self.analyze_single_view_screening,
            inputs=[file_input, radio, self.stat],
            outputs=result_output
        )
        
    def single_view_diagnosis_interface(self):
        """
        单视图界面
        """
        gr.Label("单视图诊断模型", show_label=False)
        # 模型选择 注册监听事件
        radio = gr.Radio(["PLAX","A2C","A3C","A4C"], label="请选择模型", value="PLAX", interactive=True)
        # 上传文件
        file_input = gr.File(label=f"PLAX视图数据", visible=True, file_count='single', container=True)
        # 预测按钮
        analyze_btn = gr.Button("分析")
        # 预测结果展示
        result_output = gr.Label("模型预测结果")

        def modelChange(view:str):
            label = f"{view}视图数据"
            return gr.update(label = label, value = None)
        
        radio.change(fn=modelChange, inputs=radio, outputs=file_input)
        
        # 设置按钮事件，根据数据类型选择不同的输入
        analyze_btn.click(
            fn=self.analyze_single_view_diagnosis,
            inputs=[file_input, radio, self.stat],
            outputs=result_output
        )

    def multi_view_diagnosis_interface(self):
        """
        多视图界面
        """
        gr.Label("多视图诊断模型", show_label=False)
        radio = gr.Radio(["A3C + A4C", "A2C + A4C"], label="请选择模型", value="A3C + A4C", interactive=True)
        with gr.Row():
            file1_input = gr.File(label=f"A3C视图数据", visible=True, file_count='single', container=True)
            file2_input = gr.File(label=f"A4C视图数据", visible=True, file_count='single', container=True)
        # 预测按钮
        analyze_btn = gr.Button("分析")
        result_output = gr.Label("模型预测结果")

        def modelChange(views:str):
            view_list = [p.strip() for p in views.split('+')]
            label1 = f"{view_list[0]}视图数据"
            label2 = f"{view_list[1]}视图数据"
            return (gr.update(label = label1, value = None),gr.update(label = label2, value = None))
        
        radio.change(fn=modelChange, inputs=radio, outputs=[file1_input, file2_input])
            
        analyze_btn.click(self.analyze_multi_view_diagnosis, inputs=[file1_input,file2_input,radio,self.stat],outputs=result_output)

    def auto_classify_single_view_screening_interface(self):
        """
        单视图自动分类筛查模型界面
        """
        gr.Label("自动视图分类+筛查", show_label=False)
        # 上传文件
        file_input = gr.File(label=f"视图数据", visible=True, file_count='single', container=True)
        # 预测按钮
        analyze_btn = gr.Button("分析")
        # 预测结果展示
        result_output = gr.Label("模型预测结果")
        
        # 设置按钮事件，根据数据类型选择不同的输入
        analyze_btn.click(
            fn=self.auto_classify_single_view_screening,
            inputs=[file_input, self.stat],
            outputs=result_output
        )


    def auto_classify_multi_view_diagnosis_interface(self):
        """
        视图自动分类 + 多视图融合诊断界面
        """
        gr.Label("自动视图分类+诊断", show_label=False)
        # 上传文件
        file_inputs = gr.File(label=f"视图数据",  visible=True, file_count='multiple', container=True)
        # 预测按钮
        analyze_btn = gr.Button("自动视图分类+分析")
        # 预测结果展示
        result_output = gr.Label("模型预测结果")

        # 设置按钮事件，根据数据类型选择不同的输入
        analyze_btn.click(
            fn=self.auto_classify_multi_view_diagnosis,
            inputs=[file_inputs, self.stat],
            outputs=result_output
        )
    def data_check(self, data_path:str) -> bool:
        """
        检查输入文件是否符合要求
        """
        suffix = Path(data_path).suffix
        # 无后缀文件
        if suffix == '':
            f = open(data_path,"rb")
            f.seek(128)
            magic_number = f.read(4)
            f.seek(0)
            if magic_number == b"DICM":
                return True
            else:
                return False
        # 有后缀文件
        if suffix in self.support_data_suffix:
            return True
        return False
    
    def video_sample(self, video_path:str):
        """
        步长为2 16帧 224*224大小
        """
        vr = VideoReader(video_path)
        frame_num = vr._num_frame
        stride = ModelPredictServer.VIDEO_SAMPLE_INTERNAL
        frame_t0 = 0
        frame_t1 = frame_num
        # 长度不足的使用 0 填充
        if frame_t1 - frame_t0 < ModelPredictServer.VIDEO_SAMPLE_NUM * stride:
            idxes = np.arange(frame_t0, frame_t1, stride)
        else:
            space = frame_t1 - ModelPredictServer.VIDEO_SAMPLE_NUM * stride - 1
            start_idx = randint(0, space)
            idxes = np.arange(start_idx, frame_t1, stride)[:16]
        # 读取视频所有帧
        imgs = []
        for idx in idxes:
            img = vr[idx].asnumpy()
            imgs.append(img)
        assert len(imgs) > 0, f"{video_path}, frame length is {len(imgs)}"
        if len(imgs) < ModelPredictServer.VIDEO_SAMPLE_NUM:
            for _ in range(ModelPredictServer.VIDEO_SAMPLE_NUM - len(imgs)):
                imgs.append(np.zeros_like(imgs[0]))
        
        # 16, 662, 812, 3
        frames = np.stack(imgs)
        frames = torch.from_numpy(frames)
        # 3, 16, 662, 812
        frames = frames.permute([3, 0, 1, 2])
        # 3, 16, 224, 224
        frames = resize(frames, [224, 224], antialias=None)
        return frames
    
    def dcm_sample(self, dcm_path:str):
        """
        步长为2 16帧 224*224大小
        """
        stride = ModelPredictServer.VIDEO_SAMPLE_INTERNAL
        frame_t0 = 0
        dcm = pydicom.dcmread(dcm_path)
        pixels = dcm.pixel_array
        # 图片数据
        if pixels.ndim == 3:
            if pixels.shape[-1] == 3:
                raise RuntimeError("This data is for a single image.")
            else: 
                pixels = np.repeat(pixels[..., None], 3, axis=3)
        if pixels.ndim != 4:
            raise RuntimeError(f"Data dimension error, expected dimension is 4(l, h, w, c), current dimension is {pixels.ndim}({pixels.shape})")
        frame_num = pixels.shape[0]
        frame_t1 = frame_num
        if frame_t1 - frame_t0 < ModelPredictServer.VIDEO_SAMPLE_NUM * stride:
            idxes = np.arange(frame_t0, frame_t1, stride)
        else:
            space = frame_t1 - ModelPredictServer.VIDEO_SAMPLE_NUM * stride
            start_idx = randint(0, space)
            idxes = np.arange(start_idx, frame_t1, stride)[:16]
        frames = pixels[idxes]
        # 长度不足的使用 0 填充
        if frames.shape[0] < ModelPredictServer.VIDEO_SAMPLE_NUM:
            frames = np.concatenate(
                [
                    frames,
                    np.zeros(
                        [
                            ModelPredictServer.VIDEO_SAMPLE_NUM - frames.shape[0],
                            frames.shape[1],
                            frames.shape[2],
                            frames.shape[3],
                        ]
                    )
                ], axis=0)
        frames = torch.from_numpy(frames).permute([3,0,1,2])
        frames = resize(frames, [224, 224], antialias=None)
        return frames
    
    def data_process(self, data_path:str):
        """
        输入数据预处理
        """
        path = Path(data_path)
        try:
            if path.suffix == "" or path.suffix == "dcm":
                frame = self.dcm_sample(path)
            else:
                frame = self.video_sample(path)
            frame = (frame - self.mean) / self.std
            return frame.unsqueeze(0)
        except RuntimeError as r:
            self.logger.error(r)
            raise gr.Error(message=f"{path.name} data parse error.{r}",print_exception=False)
        except Exception as e:
            self.logger.error(e)
            raise gr.Error(message=f"{path.name} data parse error",print_exception=False)

    def storage_file(self, data_path:str, user_name:str,view:str) -> str:
        """
        永久保存数据
        """
        path = Path(data_path)
        suffix = path.suffix
        parts = path.parts
        randNum = parts[-2]
        storage_file_name = randNum + "-" + view.strip() + suffix
        shutil.copy(data_path, os.path.join(self.storage_path, user_name, storage_file_name))
        return os.path.join(self.storage_path, user_name, storage_file_name)

    def analyze_single_view_screening(self, data_path:str, view:str, user_info):
        """
        单视图模型预测
        """
        if data_path is None or len(data_path) == 0:
            raise gr.Error("data is empty!",print_exception=False)
        if not self.data_check(data_path):
            raise gr.Error(message=f"{data_path.split(self.separator)[-1]} 不支持的文件！",print_exception=False)
        data:torch.Tensor = self.data_process(data_path)
        # 模型诊断
        result = self.MyoEcho.predict_screening(data, view)
        # 数据持久化
        save_path = self.storage_file(data_path, user_info["userName"], view)
        self.logger.info(f"[user: {user_info['userName']}, remark: {user_info['remark']}], file: {save_path}, view: {view}, result: {result}")
        return result

    def analyze_single_view_diagnosis(self, data_path:str, view:str, user_info:dict[str, str]):
        """
        单视图模型预测
        """
        if data_path is None or len(data_path) == 0:
            raise gr.Error("data is empty!",print_exception=False)
        if not self.data_check(data_path):
            raise gr.Error(message=f"{data_path.split(self.separator)[-1]} 不支持的文件！",print_exception=False)
        data:torch.Tensor = self.data_process(data_path)
        # 模型诊断
        result = self.MyoEcho.predict_diagnosis(data, view)
        save_path = self.storage_file(data_path, user_info["userName"], view)
        self.logger.info(f"[user: {user_info['userName']}, remark: {user_info['remark']}], file: {save_path}, view: {view}, result: {result}")
        return result

    def analyze_multi_view_diagnosis(self, data_path1:str, data_path2:str, multi_view:str, user_info:dict[str, str]):
        """
        多视图模型预测
        """
        if len(data_path1) == 0 or len(data_path2) == 0:
            raise gr.Error("data is empty!",print_exception=False)
        if not self.data_check(data_path1):
            raise gr.Error(message=f"{data_path1.split(self.separator)[-1]}不支持的文件！",print_exception=False)
        if not self.data_check(data_path2):
            raise gr.Error(message=f"{data_path2.split(self.separator)[-1]}不支持的文件！",print_exception=False)
        data1:torch.Tensor = self.data_process(data_path1)
        data2:torch.Tensor = self.data_process(data_path2)
        # 模型诊断
        result = self.MyoEcho.predict_diagnosis(torch.stack([data1, data2]), multi_view)
        # 数据持久化
        save_path1 = self.storage_file(data_path1, user_info["userName"], multi_view.split('+')[0])
        save_path2 = self.storage_file(data_path2, user_info["userName"], multi_view.split('+')[1])
        self.logger.info(f"[user: {user_info['userName']}, remark: {user_info['remark']}], file1: {save_path1}, file2: {save_path2}, view: {multi_view}, result: {result}")
        return result

    def auto_classify_single_view_screening(self, data_path:str, user_info):
        """
        单视图模型预测
        """
        if data_path is None or len(data_path) == 0:
            raise gr.Error("data is empty!",print_exception=False)
        if not self.data_check(data_path):
            raise gr.Error(message=f"{data_path.split(self.separator)[-1]} 不支持的文件！",print_exception=False)
        data:torch.Tensor = self.data_process(data_path)
        # 视图分类
        view = self.view_classifier.predict(data[:,:,0])[0]
        # 模型诊断
        result = self.MyoEcho.predict_screening(data, view)
        # 数据持久化
        save_path = self.storage_file(data_path, user_info["userName"], view)
        self.logger.info(f"[user: {user_info['userName']}, remark: {user_info['remark']}], file: {save_path}, view: {view}, result: {result}")
        return gr.update(value=result, label=view)

    def auto_classify_multi_view_diagnosis(self, data_paths:list[str], user_info:dict[str, str]):
        """
        视图分类 + 多视图诊断模型预测
        """
        if len(data_paths) == 0:
            raise gr.Error("The number of files is 0!",print_exception=False)
        if len(data_paths) > 4:
            raise gr.Error("The number of files is greater than four!",print_exception=False)
        # 检查文件类型
        for data_path in data_paths:
            if not self.data_check(data_path):
                raise gr.Error(message=f"{data_path.split(self.separator)[-1]}不支持的文件！",print_exception=False)
        # 文件读取 + 自动分类
        view_info = {}
        for data_path in data_paths:
            data:torch.Tensor = self.data_process(data_path)
            view = self.view_classifier.predict(data[:,:,0])[0]
            view_info[view] = data
            # 数据持久化
            save_path = self.storage_file(data_path, user_info["userName"], view)
            self.logger.info(f"[user: {user_info['userName']}, remark: {user_info['remark']}], file: {save_path},  view: {view}")
        # 选择模型
        if "A3C" in view_info.keys() and "A4C" in view_info.keys():
            gr.Info("使用A3C + A4C模型进行诊断")
            result = self.MyoEcho.predict_diagnosis(torch.stack([view_info["A3C"], view_info["A4C"]]), "A3C + A4C")
            self.logger.info(f"[user: {user_info['userName']}, remark: {user_info['remark']}], file: view: A3C + A4C, result: {result}")
            return gr.update(label="A3C + A4C", value=result)
        elif "A2C" in view_info.keys() and "A4C" in view_info.keys():
            gr.Info("使用A2C + A4C模型进行诊断")
            result = self.MyoEcho.predict_diagnosis(torch.stack([view_info["A2C"], view_info["A4C"]]), "A2C + A4C")
            self.logger.info(f"[user: {user_info['userName']}, remark: {user_info['remark']}], file: view: A2C + A4C, result: {result}")
            return gr.update(label="A2C + A4C", value=result)
        elif "A4C" in view_info.keys():
            gr.Info("使用A4C模型进行诊断")
            result = self.MyoEcho.predict_diagnosis(view_info["A4C"], "A4C")
            self.logger.info(f"[user: {user_info['userName']}, remark: {user_info['remark']}], view: A4C, result: {result}]")
            return gr.update(label="A4C", value=result)
        elif "A3C" in view_info.keys():
            gr.Info("使用A3C模型进行诊断")
            result = self.MyoEcho.predict_diagnosis(view_info["A3C"], "A3C")
            self.logger.info(f"[user: {user_info['userName']}, remark: {user_info['remark']}], view: A3C, result: {result}")
            return gr.update(label="A3C", value=result)
        elif "PLAX" in view_info.keys():
            gr.Info("使用PLAX模型进行诊断")
            result = self.MyoEcho.predict_diagnosis(view_info["PLAX"], "PLAX")
            self.logger.info(f"[user: {user_info['userName']}, remark: {user_info['remark']}], view: PLAX, result: {result}")
            return gr.update(label="PLAX", value=result)
        elif "A2C" in view_info.keys():
            gr.Info("使用A2C模型进行诊断")
            result = self.MyoEcho.predict_diagnosis(view_info["A2C"], "A2C")
            self.logger.info(f"[user: {user_info['userName']}, remark: {user_info['remark']}], view: A2C, result: {result}")
            return gr.update(label="A2C", value=result)

    def run(self):
        app = self.create_interface()
        app.queue().launch(server_name=self.server_name, server_port=self.server_port, share=False)

if __name__ == "__main__":
    server = ModelPredictServer([".avi",".mp4",".dcm"],"127.0.0.1",7861,"/home/debian/storage_dir")
    server.run()
        