from typing import Optional, Union, Tuple
from timesformer_pytorch import TimeSformer
import torch
import os
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
from transformers import DecisionTransformerModel
from transformers import DecisionTransformerConfig
from transformers.models.decision_transformer.modeling_decision_transformer import DecisionTransformerOutput
import pandas as pd
import ast
import numpy as np
from math import ceil
from tools.recoder_args import DefaultArgs


class LazyLoadingDataset(Dataset):
    def __init__(self, record_analysis, num_frames=3, img_resize=(224, 224), pad_zeros=False):
        self.record_analysis = record_analysis
        self.file_list = record_analysis.img_path_ls
        self.df = pd.read_csv(self.record_analysis.output_path__reward_state_info_df)

        self.action_keys = list(self._get_action(-1).keys())

        self.img_resize = img_resize

        self.transform = transforms.Compose([
            transforms.Resize(self.img_resize),
            transforms.ToTensor(),
        ])
        self.num_frames = num_frames
        self.pad_zeros = pad_zeros

        self.total_frames = len(self.df)
        self._length = ceil(self.total_frames // num_frames)

    def _get_action(self, i):
        target_row = self.df.iloc[i]
        assert isinstance(target_row['key'], str)
        assert isinstance(target_row['mouse'], str)
        action: dict = ast.literal_eval(target_row['key'])
        _m_action = ast.literal_eval(target_row['mouse'])
        action.update(**_m_action)
        if 'time' in action:
            action.pop('time')
        if 'i' in action:
            action.pop('i')
        return action

    def __len__(self):
        return self._length
        # return len(self.file_list//num_frames)

    def __getitem__(self, i):
        num_frames = self.num_frames
        idx = i * num_frames

        end_i = min(self.total_frames, idx + num_frames)

        start_i = 0 if end_i < num_frames else end_i - num_frames

        img_path_ls = self.file_list[start_i: end_i]
        video = [Image.open(img_path).convert('RGB') for img_path in img_path_ls]

        # self.df.head()
        # self.df.columns

        # from bdtime import show_ls
        # show_ls(img_path_ls)

        last_index = end_i - 1
        last_row = self.df.iloc[last_index]
        rows = self.df.iloc[start_i: end_i]
        file_names = rows['file_name'].tolist()

        # if not file_names:
        #     print(1)
        assert file_names, 'file_names为空?'
        while len(file_names) < num_frames:
            file_names.append('')

        reward = rows['reward'].sum()
        rtg = last_row['rtg']

        _action = self._get_action(last_index)
        # action = [[_action.get(k)] for k in self.action_keys]
        action = [_action.get(k) for k in self.action_keys]
        # action = torch.LongTensor(action)
        action = np.array(action)
        # len(action)

        info = last_row['game_state'], last_row['extinct_enemy'], last_row['remain_enemy']
        info = torch.LongTensor(info)

        if self.transform:
            video = [self.transform(image) for image in video]
        video = torch.stack(video)

        if end_i < num_frames:
            img_1 = video[0]
            for i in range(num_frames - end_i):
                if self.pad_zeros:
                    img_i = torch.zeros(img_1.shape)
                else:
                    img_i = img_1
                video = torch.cat((img_i.unsqueeze(0), video))
            assert len(video) == num_frames, \
                f"video's length must equal num_frames! len(video)[{len(video)} != {num_frames}"

        mask = torch.ones(*video.shape[:2]).bool()
        # return video, mask, action, reward, rtg, info, file_names
        return video, mask, action, reward, rtg, info


class MySequenceDataset(Dataset):
    def __init__(self, operation_name, state_feature_extractor):
        operation_dir_path = os.path.join(DefaultArgs.record_path.dir_path__screen_image, operation_name)
        img_dir_names = os.listdir(operation_dir_path)
        # img_dir_name = img_dir_names[0]
        self.img_dir_names = img_dir_names
        self.length = len(self.img_dir_names)

        self.state_feature_extractor = state_feature_extractor
        self.device = self.state_feature_extractor.device

    def __len__(self):
        return self.length

    def __getitem__(self, i):
        from tools.my_reward_analysis import RewardAnalysis
        # self.state_feature_extractor = self.state_feature_extractor.to(self.device)

        # trajectories = []
        # for i in range(self.length):
        img_dir_name = self.img_dir_names[i]
        # record_analysis = RewardAnalysis(operation_name=operation_name, img_dir_name=img_dir_name)
        # lazy_loading_dataset = LazyLoadingDataset(record_analysis=record_analysis, num_frames=num_frames)

        data_preprocessing = DataPreprocessing(operation_name, img_dir_name, num_frames=num_frames, batch_size=-1)
        data_loader, data_shape, record_analysis = data_preprocessing.get_data_loader()  # data shape: b f c h w
        print(f'img_dir_name: {img_dir_name} --- data_shape(b f c h w): {data_shape}')

        for batch_data in data_loader:
            # break
            # batch_data
            videos, mask, actions, rewards, rtg, infos = batch_data

            # if i == 0:
            print(f'shape --- videos, action, reward, rtg, info: {[list(obj.numpy().shape) for obj in [videos, actions, rewards, rtg, infos]]}')

            # video = video.to(self.device)
            # observations = self.state_feature_extractor(video)
            observations = videos
            # state.shape
            #
            # 35 * 4 * 3 * 244 * 244 / 1024 / 1024
            # 35 * 128 / 1024 / 1024
            trajectory = {
                'rewards': rewards,
                'actions': actions,
                'observations': observations,
                'rtg': rtg,
                'img_dir_name': img_dir_name,
            }
            # trajectories.append(trajectory)

            return trajectory


class DataPreprocessing:
    def __init__(self, operation_name, img_dir_name, num_frames=3, batch_size=3, img_resize=(224, 224), shuffle=False):
        # dir_path__screen_image = DefaultArgs.record_path.dir_path__screen_image
        # operation_dir_path = os.path.join(dir_path__screen_image, operation_name)
        # img_dir_names = os.listdir(operation_dir_path)
        # img_dir_name = img_dir_names[0]
        # self.img_dir_names = img_dir_names

        self.operation_name = operation_name
        self.img_dir_name = img_dir_name
        self.num_frames = num_frames

        self.img_resize = img_resize
        self.shuffle = shuffle

        self.batch_size = batch_size

    def get_data_loader(self):
        # data shape: b f c h w
        from tools.my_reward_analysis import RewardAnalysis
        record_analysis = RewardAnalysis(operation_name=operation_name, img_dir_name=img_dir_name)

        lazy_loading_dataset = LazyLoadingDataset(record_analysis=record_analysis,
                                                  num_frames=num_frames)

        batch_size = self.batch_size
        if batch_size == -1:
            batch_size = len(lazy_loading_dataset)
        data_loader = DataLoader(lazy_loading_dataset, batch_size=batch_size, shuffle=self.shuffle)

        data_shape = [batch_size, num_frames, 3, *self.img_resize]

        # data_iterator = iter(data_loader)
        # batch_iter = next(data_iterator)
        # print(f'--- b f c h w --- batch_iter.shape: {batch_iter.shape}')  # b f c h w

        return data_loader, data_shape, record_analysis


class StateFeatureExtractModel(torch.nn.Module):
    def __init__(self,
                 num_frames=3,
                 dim=128,
                 image_size=224,
                 patch_size=16,
                 num_classes=1,
                 depth=4,
                 heads=4,
                 dim_head=32,
                 attn_dropout=0.1,
                 ff_dropout=0.1,
                 get_last_hidden_outputs=True,
                 device=None
                 ):
        super().__init__()
        self.tsf = TimeSformer(
            num_frames=num_frames,
            dim=dim,
            image_size=image_size,
            patch_size=patch_size,
            num_classes=num_classes,
            depth=depth,
            heads=heads,
            dim_head=dim_head,
            attn_dropout=attn_dropout,
            ff_dropout=ff_dropout,
            get_last_hidden_outputs=get_last_hidden_outputs,
        )
        if device is None:
            device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.device = device

    def forward(self, video, mask=None):
        # print(f'--- b f c h w --- batch_iter.shape: {batch_iter.shape}')
        if mask is None:
            mask = torch.ones(*video.shape[:2]).bool()
            mask = mask.to(video.device)
        # mask = mask.to(self.device)
        # video = video.to(self.device)
        # self.tsf = self.tsf.to(self.device)

        last_hidden_output = self.tsf(video, mask=mask)
        features = last_hidden_output[:, 0]
        return features


# class MyDecisionTransformerConfig(DecisionTransformerConfig):
#     def __init__(self, state_feature_extractor, *args, **kwargs):
#         self.state_feature_extractor = state_feature_extractor
#         super().__init__(*args, **kwargs)


class MyDecisionTransformerModel(DecisionTransformerModel):
    # def __init__(self, *args, **kwargs):
    #     if self.config.state_feature_extractor:
    #
    #     super().__init__(*args, **kwargs)

    def __init__(self, state_feature_extractor, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.state_feature_extractor = state_feature_extractor

    def forward(self, states, mask=None, *args, **kwargs) -> Union[Tuple[torch.FloatTensor], DecisionTransformerOutput]:
        if self.state_feature_extractor:
            states = self.state_feature_extractor(states, mask)
        print('--- states.shape:', states.shape)
        ret = super().forward(states, *args, **kwargs)
        return ret


class MyTrainer:

    def __init__(self, model, data_loader, mask, train_epoch, device=None):
        if not device:
            self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = model
        self.data_loader = data_loader
        self.train_epoch = train_epoch
        self.mask = mask

        self.total_data = len(data_loader.dataset.file_list)
        self.batch_size = data_loader.batch_size

    def train(self):
        from bdtime import tt

        device = self.device
        # mask = self.mask.to(device)
        model = self.model.to(device)

        from tqdm import tqdm

        tt.__init__()
        for i in range(self.train_epoch):
            tq_i = tqdm(total=self.total_data)
            tq_i.desc = f'epoch [{i + 1}/{self.train_epoch}]'
            for batch_data in data_loader:
                # print('--- batch_data.shape:', batch_data.shape)
                video, mask, action, reward, rtg, info = batch_data
                video = video.to(device)
                # mask = torch.ones(*video.shape[:2]).bool().to(device)
                mask = None
                last_hidden_output = model(video, mask=mask)  # (2, 10)
                tq_i.update(video.shape[0])

        # print('--- last_hidden_output.shape:', last_hidden_output[:, 0].shape)
        print(f'[per epoch and batch_size] mean cost_time: {tt.now() / self.train_epoch / self.data_loader.batch_size: g}')
        # print('--- end ---')


if __name__ == '__main__':
    operation_name = 'cao_shen__0'
    img_dir_name = '0'
    num_frames = 4
    batch_size = 2
    # batch_size = -1

    state_dim = 128
    state_feature_extractor = StateFeatureExtractModel(num_frames=num_frames, dim=state_dim)

    seq_dataset = MySequenceDataset(operation_name=operation_name, state_feature_extractor=state_feature_extractor)
    seq_dataset[3]
    exit()

    data_preprocessing = DataPreprocessing(operation_name, img_dir_name, num_frames=num_frames, batch_size=batch_size)
    data_loader, data_shape, record_analysis = data_preprocessing.get_data_loader()  # data shape: b f c h w
    print(f'--- data_shape(b f c h w): {data_shape}')

    i = 0
    for batch_data in data_loader:
        video, mask, action, reward, rtg, info = batch_data
        # video, mask, action, reward, rtg, info, file_names = batch_data
        # [fs[1] for fs in file_names]
        reward = list(np.round(reward.numpy(), 3))
        rtg = list(np.round(rtg.numpy(), 3))
        print(i, video.shape, '---', reward, '---', rtg)
        # print(i, video.shape,'---', file_names, '---', reward, '---', rtg)

        i += 1

        break

    exit()

    # region # --- test state_feature_extractor
    # mask = torch.ones(data_shape[0], data_shape[1]).bool()
    # # mask.shape
    # video = torch.ones(data_shape)
    # # video.shape
    # features = state_feature_extractor(video, mask)
    # features = state_feature_extractor(video)
    # features.shape
    # endregion

    config = DecisionTransformerConfig(
        state_dim=state_dim,
        act_dim=4,
    )

    model = MyDecisionTransformerModel(state_feature_extractor=state_feature_extractor, config=config)
    model = state_feature_extractor

    train_epoch = 3
    trainer = MyTrainer(model=model, data_loader=data_loader, mask=None, train_epoch=train_epoch)
    trainer.train()
