import time
import pandas as pd
import csv
import json
import os
from django_filters.rest_framework import FilterSet, CharFilter, NumberFilter
from ai import tasks
from ai.models import Projects, Models, Services
from ai.serializers import ModelsSerializer, ModelsCreateUpdateSerializer
from ai.utils.config import docker_path, dataset_path, docker_volume_path, docker_dataset_path, docker_flask_url
from ai.utils.datasets_util import divide_classify, divide_detect, coco_segment, coco_detect
from ai.utils.docker_util import create_container, check_container
from datasource.minio_config import minioClient
from dvadmin.utils.json_response import DetailResponse
from dvadmin.utils.viewset import CustomModelViewSet
from rest_framework.response import Response
import docker
import base64
import requests


class ModelFilterSet(FilterSet):
    name = CharFilter(field_name="name", lookup_expr="contains")
    creator_id = NumberFilter(field_name="creator_id", lookup_expr="exact")
    type = NumberFilter(field_name="type", lookup_expr="exact")
    status = NumberFilter(field_name="status", lookup_expr="exact")
    is_published = NumberFilter(field_name="is_published", lookup_expr="exact")


class ModelsView(CustomModelViewSet):
    """
        模型管理接口
        list:查询
        create:新增
        update:修改4
        retrieve:单例
        destroy:删除
    """
    queryset = Models.objects.all()
    serializer_class = ModelsSerializer
    create_serializer_class = ModelsCreateUpdateSerializer
    update_serializer_class = ModelsCreateUpdateSerializer
    filter_fields = ['creator_id']
    filterset_class = ModelFilterSet

    # 模型测试
    def predict(self, request):
        id = request.GET.get("id")
        model = Models.objects.get(pk=id)

        # 判断是否需要开启容器
        if not check_container():
            create_container()
            time.sleep(5)

        test_result_path = 'runs/' + model.name + '/test'  # 测试结果路径
        data_path = test_result_path + '/test.jpg'  # 测试图片路径
        model_type = model.type
        model_weights = model.weights
        # 访问
        params = {
            'model_type': model_type,
            'data_path': data_path,
            'test_result_path': test_result_path,
            'model_weights': model_weights
        }
        res = requests.get(url=docker_flask_url, params=params)
        print(res.text)

        # 后端传送base64编码后的图片
        picture_path = os.path.join(docker_path, test_result_path, 'exp', 'test.jpg')
        with open(picture_path, 'rb') as img_f:
            img_stream = img_f.read()
            img_stream = base64.b64encode(img_stream).decode("utf-8")

        return DetailResponse(data=img_stream, msg="测试成功")

    # 获取评估报告
    def report(self, request):
        id = request.GET.get("id")
        model = Models.objects.get(pk=id)
        serializer = ModelsSerializer(instance=model, many=False)  # 序列化单个对象

        # 处理Excel数据并返回字典格式
        train_result_path = 'runs/' + model.name + '/train'
        excel_path = os.path.join(docker_path, train_result_path, 'exp/results.csv')

        with open(excel_path, 'r') as f:
            reader = csv.DictReader(f, skipinitialspace=True)
            df_list = [row for row in reader]
        print(df_list)
        # excel_data = pd.read_csv(excel_path).fillna(0)  # segment csv文件中有NaN 替换为0
        # one_row_keys = excel_data.keys()  # 读取出来的首行数据
        # row_keys = [i for i in one_row_keys]  # 首行遍历形成的列表格式
        # sum_rows = excel_data.index.values  # 读取所有行数形成列表
        # df_list = []
        # for i in sum_rows:  # 遍历所有行号
        #     # loc为按列名索引 iloc 为按位置索引，使用的是 [[行号], [列名]]
        #     row_dict = excel_data.loc[i, row_keys].to_dict()
        #     # 去除key的空格
        #     dict = {k.replace(" ", ""): v for k, v in row_dict.items()}
        #     df_list.append(dict)

        # 图像分类
        if model.type == 0:
            # 返回响应
            data = {
                "code": 2000,
                'msg': "获取成功",
                'data': [df_list]
            }

        # 目标检测
        if model.type == 1:
            Confusion_path = os.path.join(docker_path, train_result_path, 'exp/confusion_matrix.png')
            F1_path = os.path.join(docker_path, train_result_path, 'exp/F1_curve.png')
            P_path = os.path.join(docker_path, train_result_path, 'exp/P_curve.png')
            R_path = os.path.join(docker_path, train_result_path, 'exp/R_curve.png')
            PR_path = os.path.join(docker_path, train_result_path, 'exp/PR_curve.png')
            # 后端传送base64编码后的图片
            with open(Confusion_path, 'rb') as img_f:
                Confusion_stream = base64.b64encode(img_f.read()).decode("utf-8")
            with open(F1_path, 'rb') as img_f:
                F1_stream = base64.b64encode(img_f.read()).decode("utf-8")
            with open(P_path, 'rb') as img_f:
                P_stream = base64.b64encode(img_f.read()).decode("utf-8")
            with open(R_path, 'rb') as img_f:
                R_stream = base64.b64encode(img_f.read()).decode("utf-8")
            with open(PR_path, 'rb') as img_f:
                PR_stream = base64.b64encode(img_f.read()).decode("utf-8")
            # 返回响应
            data = {
                "code": 2000,
                'msg': "获取成功",
                'data': [df_list, Confusion_stream, P_stream, R_stream, PR_stream, F1_stream, ]
            }

        # 图像分割
        if model.type == 2:
            Confusion_path = os.path.join(docker_path, train_result_path, 'exp/confusion_matrix.png')
            with open(Confusion_path, 'rb') as img_f:
                Confusion_stream = base64.b64encode(img_f.read()).decode("utf-8")
            # 返回响应
            data = {
                "code": 2000,
                'msg': "获取成功",
                'data': [df_list, Confusion_stream]
            }

        return Response(data)

    # 选择图片源时移动media下的图片到模型测试文件路径
    def moveImg(self, request):
        url = str(request.data.get("url"))

        bucket_name = url.split('/')[-2]
        image_name = url.split('/')[-1]
        model_id = request.data.get("id")
        model = Models.objects.get(pk=model_id)

        # 测试图片路径
        test_result_path = 'runs/' + model.name + '/test'
        dir_path = os.path.join(docker_path, test_result_path)
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)

        # 下载图片
        file_path = os.path.join(dir_path, "test.jpg")
        response = minioClient.get_object(bucket_name, image_name)
        file = open(file_path, 'wb')
        file.write(response.data)
        file.close()

        # 返回响应
        data = {
            "code": 2000,
            'msg': "上传图片成功",
            'data': file_path,
        }
        return Response(data)

    # 上传测试图片
    def uploadImg(self, request):
        # 获取文件对象 file.file  上传文件的字节流 file.name  上传文件的名字
        file = request.data.get("file")
        model_id = request.data.get("id")
        model = Models.objects.get(pk=model_id)

        # 测试图片路径
        test_result_path = 'runs/' + model.name + '/test'
        dir_path = os.path.join(docker_path, test_result_path)
        os.makedirs(dir_path, exist_ok=True)

        # 拼接图片名称 测试图片名称改为test.jpg suffix = file.name.split(".")[1]
        file_path = os.path.join(dir_path, "test.jpg")
        # 保存图片
        with open(file_path, "wb") as f:
            f.write(file.file.read())

        # 返回响应
        data = {
            "code": 2000,
            'msg': "上传图片成功",
            'data': file_path,
        }
        return Response(data)

    # 上传Json文件
    def uploadJson(self, request):
        # 获取文件对象
        id = str(request.data.get("id"))
        file = request.data.get("file")  # list
        name = id + '.json'

        # list转json
        str_json = json.dumps(file, indent=2, ensure_ascii=False)

        # 创建测试目录
        file_path = os.path.join(dataset_path, id, name)
        print(file_path)
        # 保存文件
        with open(file_path, "w") as f:
            f.write(str_json)
            f.flush()

        # 返回响应
        data = {
            "code": 2000,
            'msg': "上传成功",
        }
        return Response(data)

    # 处理数据集
    def handleDataset(self, request):
        id = str(request.GET.get("id"))
        type = request.GET.get("type")

        # 图像分类
        if type == "0":
            divide_classify(id)
        # 目标检测
        elif type == "1":
            coco_detect(id)
            divide_detect(id)
        # 图像分割
        elif type == "2":
            coco_segment(id)
            divide_detect(id)

        # 返回响应
        data = {
            "code": 2000,
            'msg': "获取成功",
        }
        return Response(data)

    # 获取训练状态
    def getStatus(self, request):
        id = request.GET.get("id")
        model = Models.objects.get(pk=id)

        # 训练终止 训练完成
        if model.status == 3 or model.status == 2:
            data = {
                "code": 2000,
                'msg': "获取成功",
                'data': model.status,
            }
            return Response(data)

        client = docker.from_env()
        # 不存在该容器 则训练完成
        flag = True
        # if len(client.containers.list()) == 0:
        #     flag = False
        for container in client.containers.list():
            if container.name == model.name:
                flag = False
        if flag:
            model.status = 2  # 修改训练状态为已完成
            # 获取训练时间
            train_result_path = 'runs/' + model.name + '/train'
            time_path = os.path.join(docker_path, train_result_path, 'exp/time.txt')
            if os.path.exists(time_path):
                with open(time_path, 'r') as f:
                    lines = f.readlines()
                    for line in lines:
                        model.time = line

        model.save()  # update方法

        # 返回响应
        data = {
            "code": 2000,
            'msg': "获取成功",
            'data': model.status,
        }
        return Response(data)

    # 终止训练
    def stopTrain(self, request):
        id = request.GET.get("id")
        model = Models.objects.get(pk=id)
        tasks.stop_container.delay(model.name)  # celery异步终止训练
        model.status = 3  # 修改训练状态为训练终止
        model.save()  # update方法

        # 返回响应
        data = {
            "code": 2000,
            'msg': "终止成功",
            'data': model.status,
        }
        return Response(data)

    # 自定义添加数据--训练模型
    # @sync_to_async
    def create(self, request, *args, **kwargs):
        client = docker.from_env()

        project = Projects.objects.get(id=request.data["project"])
        dataset_id = request.data["dataset_id"]  # 数据集ID
        model_name = request.data["name"]  # 模型名称
        model_version = request.data["model_version"]  # 模型版本
        train_result_path = 'runs/' + model_name + '/train'  # 训练结果路径

        # 根据用户选择的模型版本，设置相应的模型参数
        if model_version == 's':
            model_version = 'yolov5s'
        elif model_version == 'm':
            model_version = 'yolov5m'
        elif model_version == 'l':
            model_version = 'yolov5l'
        elif model_version == 'x':
            model_version = 'yolov5x'

        # 图像分类
        if project.type == 0:
            dataset = docker_dataset_path + "/" + str(dataset_id)  # 数据集路径
            command_train = 'python classify/train.py' + \
                            ' --model weights/' + model_version + '-cls.pt' + \
                            ' --data ' + dataset + \
                            ' --epochs ' + str(request.data["epochs"]) + \
                            ' --batch-size ' + str(request.data["batchsize"]) + \
                            ' --project ' + train_result_path + \
                            ' --img 224' + ' --workers 0'
        #  目标检测
        elif project.type == 1:
            cfg = model_version + '.yaml'
            dataset = docker_dataset_path + "/" + str(dataset_id) + "/" + 'data.yaml'  # 数据集路径
            command_train = 'python train.py' + \
                            ' --weights weights/' + model_version + '.pt' + \
                            ' --data ' + dataset + \
                            ' --cfg ' + cfg + \
                            ' --epochs ' + str(request.data["epochs"]) + \
                            ' --batch-size ' + str(request.data["batchsize"]) + \
                            ' --project ' + train_result_path + \
                            ' --img 640' + ' --workers 0'
        # 图像分割
        elif project.type == 2:
            cfg = 'segment/' + model_version + '-seg.yaml'
            dataset = docker_dataset_path + "/" + str(dataset_id) + "/" + 'data.yaml'  # 数据集路径
            command_train = 'python segment/train.py' + \
                            ' --weights weights/' + model_version + '-seg.pt' + \
                            ' --data ' + dataset + \
                            ' --cfg ' + cfg + \
                            ' --epochs ' + str(request.data["epochs"]) + \
                            ' --batch-size ' + str(request.data["batchsize"]) + \
                            ' --project ' + train_result_path + \
                            ' --img 640' + ' --workers 0'
        print(command_train)
        # 创建docker容器
        container = client.containers.run('ainew:latest',
                                          name=model_name,
                                          volumes={
                                              "/g/bishe/docker/test": {'bind': '/usr/src/app', 'mode': 'rw'}
                                          },
                                          ports={"6006": "6006"},
                                          detach=True, remove=True, tty=True,
                                          command=command_train)
        # 启动TensorBoard命令
        command_tensorboard = ['tensorboard', '--logdir=' + train_result_path]
        # 在容器内部执行TensorBoard命令
        process_tensorboard = container.exec_run(cmd=command_tensorboard, detach=True)

        # 训练完成
        request.data["type"] = project.type
        request.data["version"] = "V1.0"
        request.data["status"] = 1
        request.data["result"] = train_result_path + '/exp'
        request.data["weights"] = train_result_path + '/exp/weights/best.pt'
        # 训练完成后保存数据
        serializer = self.get_serializer(data=request.data, request=request)
        serializer.is_valid(raise_exception=True)
        self.perform_create(serializer)
        serializer.save()  # create方法
        return DetailResponse(data=serializer.data, msg="新增成功")

    # 手动实现级联删除
    def destroy(self, request, *args, **kwargs):
        instance = self.get_object()
        try:
            service = Services.objects.get(model_id=instance.id)
        except Services.DoesNotExist:
            service = None

        request_data = request.data
        soft_delete = request_data.get('soft_delete', True)
        if soft_delete:
            instance.is_deleted = True
            instance.save()
            if service:
                service.is_deleted = True
                service.save()
        else:
            self.perform_destroy(instance)
            if service:
                self.perform_destroy(service)

        return DetailResponse(data=[], msg="删除成功")
