# -*- coding: utf-8 -*
from mmdet.apis import init_detector, inference_detector
import mmcv
import os
import cv2
import numpy as np
from scipy import ndimage
import time
from PIL import Image, ImageDraw, ImageFont
import subprocess as sp
from Config import CLASSES, Detector_para, Flask_para


"""
图像识别主函数
函数名称：main_inference
rtmp_path：rtmp地址
taskId
taskId：输入网络模型
 """
def main_inference(rtmp_path, taskid, network):
    cap_signal_flag = True
    # 构建初始参数
    img_detect_out, img2video_path, my_num = init_para(taskid)
    # 构建检测以后的推流地址
    p, fps, width, height = rtmp_p(rtmp_path, taskid)
    while True:
        try:
            cap = cv2.VideoCapture(rtmp_path)
            if not cap.isOpened():
                print("no cap, 程序等待中，请重新建立推流服务，并再次发起请求")
                # 合成视频
                img2video(img_detect_out, img2video_path, taskid, fps, width, height)
                cap_signal_flag = False
                return cap_signal_flag
            while True:
                ret, frame = cap.read()
                if ret:
                    print("frame success!")
                    # 检测
                    rtmp_detect(frame, my_num, img_detect_out, network, p)
                    my_num += 1

                else:
                    print("no frame!")
                    black_img = cv2.imread(Flask_para['base_path'] + Detector_para['blackimg'])
                    black_img = cv2.resize(black_img, (width, height))
                    p.stdin.write(black_img.tostring())
                    time.sleep(5)
                    break

        except:
            print("cap fail! 程序等待中，请重新建立推流服务，并再次发起请求")
            time.sleep(10)
            # 合成视频
            img2video(img_detect_out, img2video_path, taskid, fps, width, height)
            continue

    return cap_signal_flag

"""
关键参数定义
函数名称：init_para
"""
def init_para(taskid):
    img_detect_out = Flask_para['base_path'] + 'img_detect_out/{}/'.format(taskid)
    img2video_path = Flask_para['base_path'] + 'img2video/{}/'.format(taskid)
    if not os.path.exists(img_detect_out):
        os.makedirs(img_detect_out)
    if not os.path.exists(img2video_path):
        os.makedirs(img2video_path)
    my_num = 1

    return img_detect_out, img2video_path, my_num


def rtmp_p(rtmp_path, taskid):
    rtmpUrl = Flask_para['rtmpurl'] + "{}".format(taskid)

    # 1.解析推流地址
    # 构建ffmpeg推流管道 设置相关参数
    cap = cv2.VideoCapture(rtmp_path)
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    # ffmpeg command
    command = ['ffmpeg',
               '-y',
               '-f', 'rawvideo',
               '-vcodec', 'rawvideo',
               '-pix_fmt', 'bgr24',
               '-s', "{}x{}".format(width, height),
               '-r', str(fps),
               '-i', '-',
               '-c:v', 'libx264',
               '-pix_fmt', 'yuv420p',
               '-preset', 'ultrafast',
               '-f', 'flv',
               rtmpUrl]
    p = sp.Popen(command, stdin=sp.PIPE)

    return p, fps, width, height


def rtmp_detect(frame, my_num, img_detect_out, network, p):
    out_img, my_num = network.solo_detect(frame, my_num, img_detect_out)
    p.stdin.write(out_img.tostring())

"""
图像转视频格式
函数名称：img2video
img_detect_out：img_detect_out图像地址
img2video_path：视频地址
taskId：输入网络模型
fps：每秒的帧数
width：视频宽度
height：视频长度
 """
def img2video(img_detect_out, img2video_path, taskid, fps, width, height):
    print("开始合成视频！")
    img_list = os.listdir(img_detect_out)
    out_path = img2video_path + '{}.avi'.format(taskid)
    four_cc = cv2.VideoWriter_fourcc(*'XVID')
    fps = fps
    size = (width, height)
    video_writer = cv2.VideoWriter(out_path, four_cc, float(fps), size)
    # run
    for item in img_list:
        if item.endswith('.jpg') or item.endswith('.png'):
            item = img_detect_out + item
            img = cv2.imread(item)
            re_pics = cv2.resize(img, size, interpolation=cv2.INTER_CUBIC)
            if len(re_pics):
                video_writer.write(re_pics)
    video_writer.release()
    print("video success!")
    print("合成的视频有{}s".format(len(img_list) / fps))
    time.sleep(0.1)

'''
检测及识别神经网络模型构建类
名称：Network
作用：构建实例分割模型
'''
class Network:
    label_info = []
    label_info_st = []
    CLASSES = CLASSES

    ###类初始化###
    def __init__(self):
        config_file = Flask_para['base_path'] + Detector_para['config_file'].split('./')[1]
        checkpoint_file = Flask_para['base_path'] + Detector_para['checkpoint_file'].split('./')[1]
        self.model = init_detector(config_file, checkpoint_file, device='cuda:0')

    ###图像读取及完成神经网络模型输入###
    def solo_detect(self, image_path, my_num, img_detect_out):
        img = mmcv.imread(image_path)
        img_show = img.copy()
        h, w, _ = img.shape
        score_thr = 0.15
        sort_by_density = True

        start_time = time.time()
        results = inference_detector(self.model, image_path)
        end_time = time.time()
        t = end_time - start_time
        print("图片:{} 检测用时: {}秒, FPS={}".format(os.path.basename(str(my_num)), round(t, 2), round(1 / t, 1)))
        ###对模型输出的结果进行解析###
        if results:
            cur_result = results[0]
            seg_label = cur_result[0]
            seg_label = seg_label.cpu().numpy().astype(np.uint8)  # 0 1组成的二值图
            cate_label = cur_result[1]
            cate_label = cate_label.cpu().numpy()  # [0 0 1 0 0 0 1 0 0 0 1 0 0 1 1 0 0 1 0 0]
            score = cur_result[2].cpu().numpy()

            vis_inds = score > score_thr
            seg_label = seg_label[vis_inds]
            num_mask = seg_label.shape[0]  # 大于阈值的分割区域数量
            cate_label = cate_label[vis_inds]
            cate_score = score[vis_inds]

            if sort_by_density:
                mask_density = []
                for idx in range(num_mask):
                    cur_mask = seg_label[idx, :, :]
                    cur_mask = mmcv.imresize(cur_mask, (w, h))
                    cur_mask = (cur_mask > 0.5).astype(np.int32)
                    mask_density.append(cur_mask.sum())
                orders = np.argsort(mask_density)
                seg_label = seg_label[orders]
                cate_label = cate_label[orders]
                cate_score = cate_score[orders]

            np.random.seed(42)
            color_masks = [
                np.random.randint(0, 256, (1, 3), dtype=np.uint8)
                for _ in range(num_mask)
            ]
            bndboxs = []
            for idx in range(num_mask):
                idx = -(idx + 1)
                cur_mask = seg_label[idx, :, :]
                cur_mask = mmcv.imresize(cur_mask, (w, h))
                cur_mask = (cur_mask > 0.5).astype(np.uint8)
                if cur_mask.sum() == 0:
                    continue
                color_mask = color_masks[idx]
                cur_mask_bool = cur_mask.astype(np.bool)
                # 画出bbox
                bndbox = []
                rows = np.any(cur_mask, axis=1)
                cols = np.any(cur_mask, axis=0)
                if len(np.where(rows)[0]) > 0:
                    ymin, ymax = np.where(rows)[0][[0, -1]]
                    xmin, xmax = np.where(cols)[0][[0, -1]]
                    bndbox.append(xmin)
                    bndbox.append(ymin)
                    bndbox.append(xmax)
                    bndbox.append(ymax)
                bndboxs.append(bndbox)

                # 画出mask
                img_show[cur_mask_bool] = img[cur_mask_bool] * 0.5 + color_mask * 0.5
                cur_cate = cate_label[idx]  # 类别的代号0
                cur_score = cate_score[idx]
                # 文本显示: 类别/类别+置信度/类别+置信度+面积
                label = self.CLASSES[cur_cate]
                # label_text = str(label) + '|{:.02f}'.format(cur_score)
                # label_text = str(label) + '|{:.02f}|{}'.format(cur_score, cur_mask.sum())
                self.label_info.append(label)
                # 文字放置位置: 中心/左上角
                center_y, center_x = ndimage.measurements.center_of_mass(cur_mask)
                vis_pos = (max(int(center_x) - 10, 0), int(center_y))
                # vis_pos = (bndbox[0], bndbox[1] + 20)
                # 显示方式: 1/2
                # cv2.putText(img_show, label_text, vis_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255))
                img_show = cv2ImgAddText(img_show, '{}:{}{}{}:{}'.format(label, str(round(cur_score * 100, 2)), '\n',
                                                                         '面积', str(cur_mask.sum())), vis_pos[0],
                                         vis_pos[1], (0, 0, 0), 15)
            bbox_flag = False
            # 是否显示bbox
            if bbox_flag:
                for bb in bndboxs:
                    cv2.rectangle(img_show, (bb[0], bb[1]), (bb[2], bb[3]), (0, 0, 255), 1)
            self.label_info_st = list(set(self.label_info))
            self.label_info_st.sort(key=self.label_info.index)
            print("当前帧检测结果: ", self.label_info_st)
            cv2.imwrite(os.path.join(img_detect_out, '%08d' % my_num + '.jpg'), img_show)

        else:
            print("当前帧检测结果: 无")

        out_img = img_show
        return out_img, my_num

'''
检测及识别结果输出
名称：cv2ImgAddText
作用：在图片上画出检测识别结果
'''
def cv2ImgAddText(img, text, left, top, textColor, textSize=20):
    if isinstance(img, np.ndarray):
        img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    draw = ImageDraw.Draw(img)
    fontStyle = ImageFont.truetype(Flask_para['base_path'] + Detector_para['simsun_path'].split('./')[1], textSize, encoding="utf-8")
    draw.text((left, top), text, textColor, font=fontStyle)
    return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
