import os
import glob
import pdb
import numpy as np
from pathlib import Path
from scipy.io import loadmat,savemat

COMBINE_COEFF_RBGFACE = True
COMBINE_LAMDK_RBGFACE = False


if __name__ == "__main__":

    path_data = Path("/mnt/data/DATA/LRW/processing")


    #num_train_file = 3000
    #num_train_count = 0

    # for each word
    v_ids = sorted(path_data.glob("*"))
    print(f"Have {len(v_ids)} dir files, \n {v_ids[:3]}")

    for i, ipath_id in enumerate(v_ids):
        id_name = ipath_id.stem

        dir_video = path_data.joinpath(id_name)
        v_dir_videos = sorted(dir_video.glob("*/*/video/video_orignal.mp4"))

        print(f"\nid= {id_name}, Have {len(v_dir_videos)} video files, \n {v_dir_videos[:3]}")

        # for each video
        for j, jpath_videos in enumerate(v_dir_videos):
            
            if (j+1) % 100 == 100: print(f"processing {j+1}/{len(v_dir_videos)}")

            jpath_data = jpath_videos.parent.parent
            #print(f"jpath_data = {jpath_data}")

            jpath_frames = jpath_data.joinpath("frames")
            v_frames = sorted(jpath_frames.glob("*.jpg"), key=lambda x: int(x.stem[-4:]))
            #print(f"contain {len(v_frames)} frames in {str(jpath_data)}")

            # 
            if COMBINE_COEFF_RBGFACE:
                jpath_coeff = jpath_data.joinpath("rgbface/coeff")
                jpath_coeff_save = jpath_data.joinpath("video/coeff_rgbface2.npy")   
                jpath_coeff_save.parent.mkdir(parents=True, exist_ok=True)

                v_coeff = sorted(jpath_coeff.glob("*.txt"))
                #print(f"contain {len(v_coeff)} reconstruction/tans in {str(jpath_data)}")

                if len(v_coeff) != len(v_frames):
                    print(f"\t ERROR && Continue: This video missing coeff{len(v_coeff)}/{len(v_frames)}, {str(jpath_coeff)} ")
                    continue
                # elif jpath_coeff_save.exists():
                #     continue
                else:
                    coeff_all = np.zeros((len(v_frames), 61), np.float32)  # 55 + 3 + 3
                    
                    for k, iframe in enumerate(v_frames):
                        fid = int(iframe.stem[-4:])
                            
                        # load data
                        kpath_coeff = jpath_coeff.joinpath(f"frame-{fid:04d}.txt")

                        kk_coeff = np.loadtxt(str(kpath_coeff)).reshape(-1)

                        kk_coeff_expr = kk_coeff[100:155]     # (55, ) , identity-100, expression-55      ==> (55, )
                        kk_coeff_rts  = kk_coeff[155:161]     # (6)    , rotation-3, translation-3, scale-1  ==> (6, )

                        coeff_all[k, :] = np.r_[kk_coeff_expr, kk_coeff_rts]                  #  (61, )        expression-55 + rotation-3 + trans-3

                    # save combine data
                    np.save(str(jpath_coeff_save), coeff_all)
                    #print(f"combine the coeff: {str(jpath_coeff)} --> {str(jpath_coeff_save)}")
         
            # combine landmark of RGBFaceNet
            if COMBINE_LAMDK_RBGFACE:
                jpath_ladmk_proj = jpath_data.joinpath("rgbface/landmark_proj")
                jpath_ladmk_crop = jpath_data.joinpath("rgbface/landmark_crop")
                
                jpath_proj_save = jpath_data.joinpath("video/landmark_rgbface_proj.npy")   
                jpath_crop_save = jpath_data.joinpath("video/landmark_rgbface_crop.npy")   
                jpath_crop_save.parent.mkdir(parents=True, exist_ok=True)

                v_ladmk_proj = sorted(jpath_ladmk_proj.glob("*.txt"))
                v_ladmk_crop = sorted(jpath_ladmk_crop.glob("*.txt"))

                #print(f"contain {len(v_ladmk_proj)} / {len(v_ladmk_crop)} landmark proj/crop in {str(jpath_data)}")

                if len(v_ladmk_proj) != len(v_frames) or len(v_ladmk_crop) != len(v_frames):
                    print(f"\t ERROR && Continue: This video missing landmark, {str(jpath_ladmk_proj)} ")
                    continue
                elif jpath_proj_save.exists() and jpath_crop_save.exists():
                    continue
                else:
                    ladmk_proj_all = np.zeros((len(v_frames), 68, 2), np.float32)
                    ladmk_crop_all = np.zeros((len(v_frames), 68, 2), np.float32)
                    
                    for k, iframe in enumerate(v_frames):
                        fid = int(iframe.stem[-4:])
                            
                        # load data
                        kpath_proj = jpath_ladmk_proj.joinpath(f"frame-{fid:04d}.txt")
                        kpath_crop = jpath_ladmk_crop.joinpath(f"frame-{fid:04d}.txt")

                        ladmk_proj_all[k, :, :] = np.loadtxt(str(kpath_proj))   # (68, 2)
                        ladmk_crop_all[k, :, :] = np.loadtxt(str(kpath_crop))   # (68, 2)

                    # save combine data
                    np.save(str(jpath_proj_save), ladmk_proj_all)
                    np.save(str(jpath_crop_save), ladmk_crop_all)
                    
                    #print(f"combine the coeff: {str(jpath_ladmk_proj)} --> {str(jpath_proj_save)}")

            #break
        #break


            # combine 3dmm

            # 


