import numpy as np
import torch
from torch.utils.data import IterableDataset, DataLoader
# import extract, utils
from dataset import extract, utils
from pathlib import Path
from tqdm import tqdm

class AMASS(IterableDataset):
    """
    A dataset object
    """

    def __init__(
        self, data_path, mode="train", window=50, offset=20, flip=False
    ):
        # basic settings
        self.window = window
        self.offset = offset
        self.flip = flip
        self.files_sketch = utils.list_all_files(Path(data_path)/'SketchRenders')
        if mode=="train":
            self.files_sketch = self.files_sketch[0:int(len(self.files_sketch)*0.8)]
        elif mode=="valid":
            self.files_sketch = self.files_sketch[int(len(self.files_sketch)*0.8):int(len(self.files_sketch)*0.9)]
        elif mode=="test":
            self.files_sketch = self.files_sketch[0:int(len(self.files_sketch)*0.3)]
            # self.files_sketch = self.files_sketch[int(len(self.files_sketch)*0.9):]

        # betweening
        self.position_std = 1
        self.velocity_std = 1
        self.start_sequence_length = 24
        self.cur_sequence_length = 24
        self.phase_std = None
        self.phase_mn = None

        self.x_mean = None
        self.x_std = None
    
    def parse_file(self):
        for video_path in self.files_sketch:
            npz_path = video_path.replace('SketchRenders', 'SMPLX')
            npz_path = npz_path.replace('.mp4', '_stageii.npz')
            print(video_path)
            anim = extract.read_data(Path(video_path),Path(npz_path))
            i = 0
            while i+self.window < anim.pos.shape[0]:
                A = np.asarray([[anim.axis[i]], [anim.axis[i+self.window]]])
                I = np.asarray(anim.video[[anim.index[i], anim.index[i+self.window]]])
                X = np.asarray(anim.pos[i: i + self.window])
                Q = np.asarray(anim.quats[i: i + self.window])
                hmr_data = {
                    "star":{ 
                        # 1. Axis angle of 24 joints (, window, 24*3=72)
                        "axis": A[0],
                        # 2. The first and last frame img of this snall motion clip (,2, height, width, 3channels)
                        "img": I[0]
                    },
                    "tar":{
                        # 1. Axis angle of 24 joints (, window, 24*3=72)
                        "axis": A[1],
                        # 2. The first and last frame img of this snall motion clip (,2, height, width, 3channels)
                        "img": I[1]
                    },
                }
                i+= self.offset
                yield (hmr_data, X, Q)
    
    def __iter__(self):
        return self.parse_file()
    
    def update_data(self, X, Q):
        X, Q, contacts_l, contacts_r, self.parents = extract.update_amass_dataset(X, Q)
        
        # global quaternion, global position,  n * sequence_length * joint_num* 4/3
        global_quaternion, global_position = utils.quat_fk(Q, X, self.parents)
        global_quaternion = torch.from_numpy(global_quaternion).to(torch.float32)
        global_position = torch.from_numpy(global_position).to(torch.float32)
        
        X = torch.from_numpy(X).to(torch.float32)
        Q = torch.from_numpy(Q).to(torch.float32)
        contacts_l = torch.from_numpy(contacts_l).to(torch.float32)
        contacts_r = torch.from_numpy(contacts_r).to(torch.float32)
        
        self.x_mean = torch.mean(
            global_position.reshape([global_position.shape[0], global_position.shape[1], -1]).permute([0, 2, 1]),
            dim=(0, 2),
            keepdim=True,
        ).to(torch.float32)
        self.x_std = torch.std(
            global_position.reshape([global_position.shape[0], global_position.shape[1], -1]).permute([0, 2, 1]),
            dim=(0, 2),
            keepdim=True,
        ).to(torch.float32)

        # global positions stats
        self.position_std = torch.std(
            global_position.reshape([global_position.shape[0], global_position.shape[1], -1]).permute([0, 2, 1]),
            dim=(0, 2),
            keepdim=True,
        ).to(torch.float32)

        # global velocity std
        gp = global_position.reshape(global_position.shape[0], global_position.shape[1], -1).numpy()
        all_position = []
        all_position.extend(gp[0, :])
        for i in range(gp.shape[0] - 1):
            all_position.extend(gp[i + 1, -20:])
        all_position = torch.Tensor(np.array(all_position))
        all_velocity = all_position[1:] - all_position[:-1]
        self.velocity_std = torch.std(all_velocity, axis=0).to(torch.float32)

        data = {
            # 1. local quaternion vector (J * 4d)
            "local_quaternion": Q,
            # 2. global root velocity vector (3d)
            "root_velocity": global_position[:, 1:, 0, :]
            - global_position[:, :-1, 0, :],
            # 3. contact information vector (4d)
            "contact": torch.cat([contacts_l, contacts_r], -1),
            # 4. global root position offset (?d)
            # last frame root position
            "root_position_offset": global_position[:, -1, 0, :],
            # 6. target
            # last frame (quaternion)
            "target": Q[:, -1, :, :],
            # 7. root pos
            "root_position": global_position[:, :, 0, :],
            # 8. global_position
            "global_position": global_position[:, :, :, :],
            # 9. global quaternion
            "global_quaternion": global_quaternion[:, :, :, :],
        }
        
        
        # flip temporal
        if self.flip:
            Q =  torch.Tensor(np.array(list(Q.numpy()[:, ::-1, :, :])))
            contacts_l = torch.Tensor(list(contacts_l.numpy()[:, ::-1, :]))
            contacts_r =  torch.Tensor(list(contacts_r.numpy()[:, ::-1, :]))
            global_position =  torch.Tensor(list(global_position.numpy()[:, ::-1, :, :]))
            # phase = phase[:, ::-1, :]

            iv_data = {
                # 1. local quaternion vector (J * 4d)
                "local_quaternion": Q,
                # 2. global root velocity vector (3d)
                "root_velocity": global_position[:, 1:, 0, :]
                - global_position[:, :-1, 0, :],
                # 3. contact information vector (4d)
                "contact": torch.cat([contacts_l, contacts_r], -1),
                # 4. global root position offset (?d)
                # last frame root position
                "root_position_offset": global_position[:, -1, 0, :],
                # 6. target
                # last frame (quaternion)
                "target": Q[:, -1, :, :],
                # 7. root pos
                "root_position": global_position[:, :, 0, :],
                # 8. global_position
                "global_position": global_position[:, :, :, :],
            }
            
            return data, iv_data
        return data