import os
import torch
import time
import logging
import numpy as np
from PIL import Image
from torch.utils.data import DataLoader
PROJECT_HOME = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_HOME = os.path.join(PROJECT_HOME, 'data')


def process_cache(unique_key=None):
    """
    数据预处理结果缓存修饰器
    :param : unique_key
    :return:
    """
    if unique_key is None:
        raise ValueError(
            "unique_key 不能为空, 请指定相关数据集构造类的成员变量，如['top_k', 'cut_words', 'max_sen_len']")

    def decorating_function(func):
        def wrapper(*args, **kwargs):
            logging.info(f" ## 索引预处理缓存文件的参数为：{unique_key}")
            obj = args[0]  # 获取类对象，因为data_process(self, file_path=None)中的第1个参数为self
            file_path = kwargs['file_path']
            file_dir = f"{os.sep}".join(file_path.split(os.sep)[:-1])
            file_name = "".join(file_path.split(os.sep)[-1].split('.')[:-1])
            paras = f"cache_{file_name}_"
            for k in unique_key:
                paras += f"{k}{obj.__dict__[k]}_"  # 遍历对象中的所有参数
            cache_path = os.path.join(file_dir, paras[:-1] + '.pt')
            start_time = time.time()
            if not os.path.exists(cache_path):
                logging.info(f"缓存文件 {cache_path} 不存在，重新处理并缓存！")
                data = func(*args, **kwargs)
                with open(cache_path, 'wb') as f:
                    torch.save(data, f)
            else:
                logging.info(f"缓存文件 {cache_path} 存在，直接载入缓存文件！")
                with open(cache_path, 'rb') as f:
                    data = torch.load(f)
            end_time = time.time()
            logging.info(f"数据预处理一共耗时{(end_time - start_time):.3f}s")
            return data

        return wrapper

    return decorating_function


class KTHData(object):
    """
    载入KTH数据集，下载地址：https://www.csc.kth.se/cvap/actions/ 一共包含6个zip压缩包
    # http://www.csc.kth.se/cvap/actions/walking.zip
    # http://www.csc.kth.se/cvap/actions/jogging.zip
    # http://www.csc.kth.se/cvap/actions/running.zip
    # http://www.csc.kth.se/cvap/actions/boxing.zip
    # http://www.csc.kth.se/cvap/actions/handwaving.zip
    # http://www.csc.kth.se/cvap/actions/handclapping.zip
    # 下载完成以后分别解压，然后放到 DATA_HOME 的kth文件夹下
    """
    DATA_DIR = os.path.join(DATA_HOME, 'kth')
    CATEGORIES = ["boxing", "handclapping", "handwaving", "jogging", "running", "walking"]
    TRAIN_PEOPLE_ID = [1, 2, 4, 5, 6, 7, 9, 11, 12, 15, 17, 18, 20, 21, 22, 23, 24]  # 25*0.7 = 17
    VAL_PEOPLE_ID = [3, 8, 10, 19, 25]  # 25*0.2 = 5
    TEST_PEOPLE_ID = [13, 14, 16]  # 25*0.1 = 3
    FILE_PATH = os.path.join(DATA_DIR, 'kth.pt')

    def __init__(self, frame_len=15,
                 batch_size=4,
                 is_sample_shuffle=True,
                 is_gray=True,
                 transforms=None):
        self.frame_len = frame_len  # 即time_step， 以FRAME_LEN为长度进行分割
        self.batch_size = batch_size
        self.is_sample_shuffle = is_sample_shuffle
        self.is_gray = is_gray
        self.transforms = transforms

    @staticmethod
    def load_avi_frames(path=None, is_gray=False):
        """
        用来读取每一个.avi格式的文件
        :param path:
        :return:
        """
        import cv2
        logging.info(f" ## 正在读取原始文件: {path}并划分数据")
        video = cv2.VideoCapture(path)
        frames = []
        while video.isOpened():
            ret, frame = video.read()  # frame: (120, 160, 3) <class 'numpy.ndarray'>
            if not ret:  # ret是一个布尔值，表示是否成功读取帧图像的数据，frame是读取到的帧图像数据。
                break
            if is_gray:
                frame = Image.fromarray(frame)
                frame = frame.convert("L")
                frame = np.array(frame.getdata()).reshape((120, 160, 1))
            frames.append(frame)
        logging.info(f" ## 该视频一共有{len(frames)}帧")
        return np.array(frames, dtype=np.uint8)  # [n, height, width, channels]
        # 必须要转换成np.uint8类型，否则transforms.ToTensor()中的标准化会无效

    @process_cache(unique_key=["frame_len", "is_gray"])
    def data_process(self, file_path=None):
        train_data, val_data, test_data = [], [], []
        for label, dir_name in enumerate(self.CATEGORIES):  # 遍历每个文件夹
            video_dir = os.path.join(self.DATA_DIR, dir_name)  # 构造每个文件夹的路径
            video_names = os.listdir(video_dir)  # 列出当前文件夹的所有文件
            for name in video_names:  # 遍历当前文件夹中的每个视频
                people_id = int(name[6:8])  # 取人员编号
                video_path = os.path.join(video_dir, name)  # 得到文件的绝对路径
                frames = self.load_avi_frames(video_path, self.is_gray)  # 读取该文件
                s_idx, e_idx = 0, self.frame_len
                while e_idx <= len(frames):  # 开始采样样本
                    logging.info(f" ## 截取帧子序列 [{s_idx}:{e_idx}]")
                    sub_frames = frames[s_idx:e_idx]  # [frame_len, 120, 160, channels]
                    if people_id in self.TRAIN_PEOPLE_ID:
                        train_data.append((sub_frames, label))
                    elif people_id in self.VAL_PEOPLE_ID:
                        val_data.append((sub_frames, label))
                    elif people_id in self.TEST_PEOPLE_ID:
                        test_data.append((sub_frames, label))
                    else:
                        raise ValueError(f"people id {people_id} 有误")
                    s_idx, e_idx = e_idx, e_idx + self.frame_len
        logging.info(f" ## 原始数据划分完毕，训练集、验证集和测试集的数量分别为: "
                     f"{len(train_data)}-{len(val_data)}-{len(test_data)}")
        data = {"train_data": train_data, "val_data": val_data, "test_data": test_data}
        return data

    def generate_batch(self, data_batch):
        """
        :param data_batch:
        :return: 每个batch的形状
                 [batch_size, frame_len, channels, height, width]
                 [batch_size, ]
        """
        batch_frames, batch_label = [], []
        for (frames, label) in data_batch:  # 开始对一个batch中的每一个样本进行处理。
            # frames的形状为 [frame_len, height, width,channels]
            if self.transforms is not None:
                # 遍历序列里的每一帧，frame的形状[height, width, channels]
                # 经过transforms.ToTensor()后的形状为[channels, height, width]
                frames = torch.stack([self.transforms(frame) for frame in frames],
                                     dim=0)  # [frame_len, channels, height, width]
            else:
                frames = torch.tensor(frames.transpose(0, 3, 1, 2))  # [frame_len, channels, height, width]
                logging.info(f"{frames.shape}")
            batch_frames.append(frames)  # [[frame_len, channels, height, width], [], []]
            batch_label.append(label)
        batch_frames = torch.stack(batch_frames, dim=0)  # [batch_size, frame_len, channels, height, width]
        batch_label = torch.tensor(batch_label, dtype=torch.long)
        return batch_frames, batch_label

    def load_train_val_test_data(self, is_train=False):
        data = self.data_process(file_path=self.FILE_PATH)
        if not is_train:
            test_data = data['test_data']
            test_iter = DataLoader(test_data, batch_size=self.batch_size,
                                   shuffle=True, collate_fn=self.generate_batch)
            logging.info(f" ## 测试集构建完毕，一共{len(test_data)}个样本")
            return test_iter
        train_data, val_data = data['train_data'], data['val_data']
        train_iter = DataLoader(train_data, batch_size=self.batch_size,  # 构造DataLoader
                                shuffle=self.is_sample_shuffle,
                                collate_fn=self.generate_batch)
        val_iter = DataLoader(val_data, batch_size=self.batch_size,
                              shuffle=False, collate_fn=self.generate_batch)
        logging.info(f" ## 训练集和验证集构建完毕，样本数量为{len(train_data)}:{len(val_data)}")
        return train_iter, val_iter

    def show_example(self, file_path=None, row=3, col=5, begin_id=10):
        """
        可视化
        :param file_path:
        :param row:
        :param col:
        :param begin_id:
        :return:
        """
        import matplotlib.pyplot as plt
        if file_path is None:
            file_path = os.path.join(self.DATA_DIR, self.CATEGORIES[0])
            file_path = os.path.join(file_path, 'person01_boxing_d1_uncomp.avi')
        frames = self.load_avi_frames(file_path)
        fig, ax = plt.subplots(row, col)
        for i, axi in enumerate(ax.flat):  # , figsize=(18, 10)
            image = frames[i + begin_id]
            axi.set_xlabel(f'Frame{i + begin_id}')
            axi.imshow(image)
            axi.set(xticks=[], yticks=[])
        plt.tight_layout()
        plt.show()
