import os
import faiss
import torch
import cv2
import math
import configparser
import pandas as pd
import time
import numpy as np
from torch import nn
from torchvision import models,transforms
from multiprocessing import Process
from utils import get_image_file_list
from rich.progress import Progress

def config_parser(cfg_path):
    """
    app config parser
    """
    config = configparser.ConfigParser()
    config.read(cfg_path)
    config = config._sections
    return config

def mkdir(input):
    if not os.path.isdir(input):
        os.makedirs(input)
    return input



def preprocess(img) -> np.array:
    height, width, _ = img.shape
    dst_size = (256, int(256*height/width)) if height > width \
        else (int(256*width/height), 256)
    img = cv2.resize(img[:,:,::-1],dst_size, interpolation=cv2.INTER_LINEAR)
    new_h, new_w, _ = img.shape
    #center_crop
    img = img[int(new_h/2) - 112: int(new_h/2) + 112,
          int(new_w/2) - 112: int(new_w/2) + 112, :]
    img = img.astype(np.float32, copy=False)
    img /= 255.0
    img -= np.array([0.485, 0.456, 0.406])
    img /= np.array([0.229, 0.224, 0.225])
    img = np.transpose(img, (2, 0, 1)) #hwc to chw
    return img

class MyVideo:
    def __init__(self, stream_path):
        self.init_cap(stream_path)
        self._frame_id = 0
        self._sign_frame_id = 0
        self._miss_seconds = 5
        self._flag = 0
        self.stack = list()
        self.interval = 8
        self.batch_size = 256

    def init_cap(self, stream_path):
        self.cap = cv2.VideoCapture(stream_path)
        #get fps
        self._fps = self.cap.get(cv2.CAP_PROP_FPS)
        self._frame_count = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
        self._width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        self._height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        self._fourcc = cv2.VideoWriter_fourcc(*'XVID')

    @staticmethod
    def frames_to_timecode(framerate, frames):
        """
        视频 通过视频帧转换成时间
        :param framerate: 视频帧率
        :param frames: 当前视频帧数
        :return:时间（00:00:01:01）
        """
        return '{0:02d}:{1:02d}:{2:02d}:{3:02d}'.format(int(frames / (3600 * framerate)),
                                                        int(frames / (60 * framerate) % 60),
                                                        int(frames / framerate % 60),
                                                        int(frames % framerate))

    def run_label(self, model, total_time,  *args, **kwargs):

        label_feature = list()
        tmp_list = list()

        while self.cap.isOpened():
            success, frame = self.cap.read()
            if not success:
                print("Can't receive frame (stream end?). Exiting ...")
                self.cap.release()
                break
            #多出的时间
            rest = math.ceil((self._frame_count - total_time * self._fps)/1.2)
            head, tail = rest, self._frame_count - rest
            if self._frame_id in (head,int(self._frame_count/2),tail):
                label_feature.append(frame)
                if len(label_feature) == 3:
                    feature = self.process_frame(label_feature, model)
                    label_feature.clear()
                    tmp_list.append(feature)
            self._frame_id += 1
        feature = np.concatenate(tmp_list)
        return feature

    def run_input(self,model, index):
        tmp_input = list()
        result = dict()
        count = 0
        first_frame_id = None
        with Progress() as progress:
            task = progress.add_task('[blue]采集广告并入库中，请稍后...', total=int(self._frame_count))
            while self.cap.isOpened():

                success, frame = self.cap.read()
                if not success:
                    print("Can't receive frame (stream end?). Exiting ...")
                    self.cap.release()
                    break

                if self._frame_id % self.interval == 0:
                    tmp_input.append(frame)
                    if len(tmp_input) == 1:
                        first_frame_id = self._frame_id
                    if len(tmp_input) == self.batch_size or \
                            self._frame_count - count*self.batch_size*self.interval==len(tmp_input):
                        features = self.process_frame(tmp_input, model)
                        faiss.normalize_L2(features)
                        D,I = index.search(features, 1)
                        if (D>0.9).any():
                            result.update({first_frame_id:(D.tolist(),I.tolist())})
                        tmp_input.clear()
                        count += 1
                self._frame_id += 1
                if not progress.finished:
                    progress.update(task1, advance=1)
        return result

    @staticmethod
    def process_frame(frames, model):
        tmp = list()
        for frame in frames:
            frame = preprocess(frame)
            tmp.append(frame)
        frames = np.stack(tmp)
        frames = torch.from_numpy(frames).to("cuda")
        with torch.no_grad():
            emdeb = model(frames).view((-1, out_nums,)).cpu().numpy()

        return emdeb

def gallery_store(dims, input):
    ngpus = faiss.get_num_gpus()
    # cpu_index = faiss.IndexFlatL2(dims)
    cpu_index = faiss.IndexFlatIP(dims)
    gpu_index = faiss.index_cpu_to_all_gpus(cpu_index)
    faiss.normalize_L2(input)
    gpu_index.add(input)
    return gpu_index

if __name__ == "__main__":
    import json
    cfg_path = "config/app.conf"
    config = config_parser(cfg_path)
    extract_video = config["detect"]["extract_video"]
    label_dir = mkdir(config["detect"]["label_dir"])
    gallery_path = mkdir(config["detect"]["gallery_path"])
    detect_path = mkdir(config["detect"]["detect_path"])
    # extract_video = "../unzip/6月广告/4ab430e0-f10b-11ec-a247-23985cd23591.1658127500511.V43565.mp4"
    if not os.path.isfile(extract_video):
        raise FileNotFoundError("extracted video not found！")
    excel_path = config["app"]["excel_path"]
    if not os.path.isfile(excel_path):
        raise FileNotFoundError("excel not found！")
    out_nums = 576
    convnext = models.mobilenet_v3_small(pretrained=True).to("cuda")
    convnext.eval()
    # convnext = models.resnext50_32x4d(pretrained=True).to("cuda")
    # convnext = models.resnet50(pretrained=True).to("cuda")
    modules = list(convnext.children())[:-1]
    convnext = nn.Sequential(*modules)
    video_paths = get_image_file_list(label_dir)
    print("读取表格中......")
    data = pd.read_excel(excel_path, sheet_name=0)
    data = data.dropna(axis=0, subset=["时长","证据下载地址"])

    cache = dict()
    count = 0
    outputs = []
    start = time.time()
    with Progress() as progress:
        task1 = progress.add_task('[red]采集广告并入库中，请稍后...', total=len(data))
        for index, row in data.iterrows():
            for path in video_paths:
                if not os.path.basename(path) in row["证据下载地址"]:
                    continue
                total_time = row["时长"]
                parse = MyVideo(path)
                output = parse.run_label(convnext,total_time)
                outputs.append(output)
                cache.update({os.path.basename(path):[[i for i in range(count, count+output.shape[0])],row["时长"]]})
                count += output.shape[0]
            if not progress.finished:
                progress.update(task1, advance=1)
        if not outputs:
            raise FileNotFoundError("提供的广告文件和excel里的证据下载地址不一致！")

        with open(os.path.join(gallery_path,"gallery.json"), "w") as f:
            json.dump(cache, f)
        f.close()
        outputs = np.concatenate(outputs)
        gallery_index = gallery_store(out_nums, outputs)

    end = time.time()
    import_gallery_time = end - start

    print("开始匹配")
    start = time.time()
    g = MyVideo(extract_video,)
    result = g.run_input(convnext, gallery_index)
    with open(os.path.join(detect_path,f'{os.path.basename(extract_video)}.json'), 'w') as f:
        json.dump(result, f)
    f.close()
    end = time.time()
    cal_time = end - start
    print("*"*10)
    print(f"导入底库的时间为{import_gallery_time}秒")
    print(f"匹配的时间为{cal_time}秒")















