import os
import faiss
import torch
import cv2
import configparser
import pandas as pd
import time
import numpy as np
from torch import nn
from torchvision import models,transforms
from multiprocessing import Process
from utils import get_image_file_list

def config_parser():
    """
    app config parser
    """
    config = configparser.ConfigParser()
    config.read("config/app.conf")
    config = config._sections
    return config

def mkdir(input):
    if not os.path.isdir(input):
        os.makedirs(input)
    return input

def _time_convert(input,cache:list,fps:int):
    if fps > 0:
        interval = fps
    else:
        interval = 60
    carry, rem = divmod(input, interval)
    cache.append(rem)
    if carry == 1 or carry == 0:
        cache.append(carry)
        return cache
    else:
        return _time_convert(carry, cache, 0)



def preprocess(img) -> np.array:
    height, width, _ = img.shape
    dst_size = (256, int(256*height/width)) if height > width \
        else (int(256*width/height), 256)
    img = cv2.resize(img[:,:,::-1],dst_size, interpolation=cv2.INTER_LINEAR)
    new_h, new_w, _ = img.shape
    #center_crop
    img = img[int(new_h/2) - 112: int(new_h/2) + 112,
          int(new_w/2) - 112: int(new_w/2) + 112, :]
    img = img.astype(np.float32, copy=False)
    img /= 255.0
    img -= np.array([0.485, 0.456, 0.406])
    img /= np.array([0.229, 0.224, 0.225])
    img = np.transpose(img, (2, 0, 1)) #hwc to chw
    return img

class MyVideo:
    def __init__(self, stream_path):
        self.init_cap(stream_path)
        self._frame_id = 0
        self._sign_frame_id = 0
        self._miss_seconds = 5
        self._flag = 0
        self.stack = list()
        self.interval = 8
        self.batch_size = 256

    def init_cap(self, stream_path):
        self.cap = cv2.VideoCapture(stream_path)
        #get fps
        self._fps = self.cap.get(cv2.CAP_PROP_FPS)
        self._frame_count = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
        self._width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        self._height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        self._fourcc = cv2.VideoWriter_fourcc(*'XVID')

    def _frame_id_to_timestap(self):
        cache = _time_convert(self._frame_id, list(), int(self._fps))
        [cache.append(0) for _ in range(4 - len(cache))]
        result = ""
        for stap in cache[::-1][:-1]:
            result += f"{stap}:"
        return result[:-1]

    def run_label(self, model, *args, **kwargs):
        label_feature = list()
        tmp_list = list()
        count = 0

        while self.cap.isOpened():
            success, frame = self.cap.read()
            if not success:
                print("Can't receive frame (stream end?). Exiting ...")
                self.cap.release()
                break
            label_feature.append(frame)
            if len(label_feature) == self.batch_size or \
                    self._frame_count - count*self.batch_size==len(label_feature):
                feature = self.process_frame(label_feature, model)
                label_feature.clear()
                tmp_list.append(feature)
                count += 1
            timestap = self._frame_id_to_timestap()
            self._frame_id += 1
        feature = np.concatenate(tmp_list)
        return feature

    def run_input(self,model, index):
        tmp_input = list()
        result = dict()
        count = 0
        while self.cap.isOpened():

            success, frame = self.cap.read()
            if not success:
                print("Can't receive frame (stream end?). Exiting ...")
                self.cap.release()
                break

            if self._frame_id % self.interval == 0:
                tmp_input.append(frame)
                if len(tmp_input) == self.batch_size or \
                        self._frame_count - count*self.batch_size*self.interval==len(tmp_input):
                    features = self.process_frame(tmp_input, model)
                    faiss.normalize_L2(features)
                    D,I = index.search(features, 1)
                    if (D>0.9).any():
                        result.update({self._frame_id:(D.tolist(),I.tolist())})
                    tmp_input.clear()
                    count += 1
            self._frame_id += 1
            print(self._frame_id)
        return result

    @staticmethod
    def process_frame(frames, model):
        start = time.time()
        tmp = list()
        for frame in frames:
            frame = preprocess(frame)
            tmp.append(frame)
        frames = np.stack(tmp)
        frames = torch.from_numpy(frames).to("cuda")
        with torch.no_grad():
            emdeb = model(frames).view((-1, out_nums,)).cpu().numpy()
        end = time.time()
        cost = end - start
        print(f"infer cost {cost} s")
        return emdeb

def gallery_store(dims, input):
    ngpus = faiss.get_num_gpus()
    # cpu_index = faiss.IndexFlatL2(dims)
    cpu_index = faiss.IndexFlatIP(dims)
    gpu_index = faiss.index_cpu_to_all_gpus(cpu_index)
    faiss.normalize_L2(input)
    gpu_index.add(input)
    return gpu_index


if __name__ == "__main__":
    import json
    config = config_parser()
    extract_video = config["detect"]["extract_video"]
    label_dir = mkdir(config["detect"]["label_dir"])
    gallery_path = mkdir(config["detect"]["gallery_path"])
    detect_path = mkdir(config["detect"]["detect_path"])
    # extract_video = "../unzip/6月广告/4ab430e0-f10b-11ec-a247-23985cd23591.1658127500511.V43565.mp4"
    if not os.path.isfile(extract_video):
        raise FileNotFoundError("extracted video not found！")
    excel_path = config["app"]["excel_path"]
    if not os.path.isfile(excel_path):
        raise FileNotFoundError("excel not found！")
    out_nums = 576
    convnext = models.mobilenet_v3_small(pretrained=True).to("cuda")
    convnext.eval()
    # convnext = models.resnext50_32x4d(pretrained=True).to("cuda")
    # convnext = models.resnet50(pretrained=True).to("cuda")
    modules = list(convnext.children())[:-1]
    convnext = nn.Sequential(*modules)
    video_paths = get_image_file_list(label_dir)
    data = pd.read_excel(excel_path, sheet_name=0)

    cache = dict()
    count = 0
    outputs = []
    for index, row in data.iterrows():
        for path in video_paths:
            if not os.path.basename(path) in row["证据下载地址"]:
                continue
            parse = MyVideo(path)
            output = parse.run_label(convnext)
            outputs.append(output)
            cache.update({os.path.basename(path):[[i for i in range(count, count+output.shape[0])],row["时长"]]})
            count += output.shape[0]
    with open(os.path.join(gallery_path,"gallery.json"), "w") as f:
        json.dump(cache, f)
    f.close()

    outputs = np.concatenate(outputs)
    gallery_index = gallery_store(out_nums, outputs)

    g = MyVideo(extract_video)
    result = g.run_input(convnext, gallery_index)
    with open(os.path.join(detect_path,f'{os.path.basename(extract_video)}.json'), 'w') as f:
        json.dump(result, f)
    f.close()
















