'''
Author: daniel
Date: 2024-07-26 19:02:12
LastEditTime: 2024-07-29 16:52:44
LastEditors: daniel
Description: 
FilePath: /data/utils/entity/data_processor.py
have a nice day
'''


import os 
import sys

sys.path.append('/root/mall_stat/data')
sys.path.append('/root/mall_stat/data/utils')
import time
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import cv2


from os.path import join,split,exists,dirname
from tqdm import tqdm 
import shutil
import json 

from my_utils.utils import * 

import random



class  DataProcessor:

    def __init__(self, data_root, site):

        def get_imgs(path):
            files = os.listdir(path)
            imgs = [x for x in files if x.endswith('.jpg')]
            print(len(files),len(imgs))
            return imgs


        self.data_root = data_root
        self.site = site
        self.dates = os.listdir(join(data_root,site))
        

        """"
        #todo copy json file 

        backup_path = data_root.replace('images', 'backup')
        for d in tqdm(self.dates):
            target_dir = join(backup_path,'images',site,d,)
            make_dir(target_dir)
            shutil.copy(join(data_root,site,d,'score.json'),target_dir)


        extra_label_root = data_root.replace('images','extra_labels')
        for d in tqdm(self.dates):
            file_name = "%s/%s/%s/prod/data/prod/customer/%s/%s/%s/events/lite2full/%s/trajectory_vtp_whole_day/realtime.vtp.json"%(extra_label_root,site,d,*site.split('_'),d,)
            target_dir = "%s/%s/%s/prod/data/prod/customer/%s/%s/%s/events/lite2full/%s/trajectory_vtp_whole_day/realtime.vtp.json"%(backup_path + '/extra_labels',site,d,*site.split('_'),d,)
            make_dir(target_dir)
            shutil.copy(file_name,target_dir) 

        """

        #!=========================================score==================================================
        

        maximum_num_4_each_day = 1e4
        self.scores = self.__get_filter_score__(site, maximum_num_4_each_day)


        #!===================================feat=========================================================
        #todo
        # self.feats = self.__get_filter_feat__(self.site)


        #!=====================================pids, pid to tid and tid to pid ===========================
        filtered_tid2pid, pid = self.__get_pid_map__(site)

        self.tid2pid =  filtered_tid2pid
        self.pid = pid

        #* pid 2 tid
        self.pid2tid = self.__get_pid2tid__()

        #!=====================================imgs names=================================================
        #* get img names
        imgs = {}
        for d in self.dates:
            imgs[d] = [k for k,v in self.scores[d].items()]
        self.imgs = imgs
        #!================================================================================================
        print('tracklet number is ', self.__get_tracklet_num__())
        
        assert len(self.imgs) == len(self.scores)
        self.length_list = [ len(self.imgs[d]) for  d in self.dates]
        #!================================================================================================

    def __get_pid2tid__(self,):
        cache_path = join(self.data_root,'cache',self.site,'pid2tid.json')
        pid2tid = {}


        if exists(cache_path):
            with open(cache_path,) as f:
                pid2tid = json.load(f)
        else:
            for d in self.dates:
                pid2tid_tmp = {}
                for ppid in tqdm(self.pid[d]):
                    pid2tid_tmp[ppid] = [k for k,v in self.tid2pid[d].items() if v == ppid]
                pid2tid[d] = pid2tid_tmp

            with open(cache_path, 'w') as f:
                json.dump(pid2tid,f)
            print('pid2tid is saved at: \t',cache_path)

        return pid2tid
        

        

    def __get_tracklet_num__(self):
        return  sum([len(self.imgs[d]) for d in self.dates])

        



    def __get_filter_feat__(self,site):
        
        staff_feat = {}
        
        #* get body quality
        cached_dir = join(self.data_root, 'cache',self.site)
        make_dir(cached_dir)

        cached_file_name = join(cached_dir,'feat.json')
        
        if exists(cached_file_name):

            with open(cached_file_name,) as f:
                staff_feat = json.load(f)
        else:
            for d in tqdm(self.dates):
                staff_feat[d] = load_json(join(self.data_root,site,d,'feats.json'))

                #* filter 
                staff_feat[d] = {k: staff_feat[d][k] for k in self.scores[d]}
            
            with open(cached_file_name,'w') as f:
                json.dump(staff_feat,f)

            print('staff_feat is saved at %s'%(cached_file_name))

        return staff_feat
        
    



    def __get_filter_score__(self,site, maximum_num_4_each_day):

        #todo filter the high quality image  by body quality 
        filtered_scores = {}
        
        #* get body quality
        cached_dir = join(self.data_root, 'cache',self.site)
        make_dir(cached_dir)
        cached_file_name = join(cached_dir,'score.json')

        counter = 0 
        if exists(cached_file_name):
            with open(cached_file_name) as f :
                filtered_scores = json.load(f)
            counter = sum([len(filtered_scores[d]) for d in self.dates])
        else:
            for d in tqdm(self.dates):
                filtered_scores[d] = load_json(join(self.data_root,site,d,'score.json'))


                #todo step 2:  filter by the acceptable  maximum number 
                if len(filtered_scores[d]) > maximum_num_4_each_day:
                    selected_keys = random.sample(list(filtered_scores[d].keys()), int(maximum_num_4_each_day))
                    filtered_scores[d] = { k: filtered_scores[d][k] for k in selected_keys }
                    print('the size becomes %d, as it is large than threshold \t'%(len(filtered_scores[d])))
                    
                #todo step 3: filtered by image state
                if len(filtered_scores[d]) > 0 :
                    filtered_scores[d] = self.filter_error_img(filtered_scores[d], d)

                print('the final filterd size %s is %d \t'%(d,len(filtered_scores[d])))
                counter += len(filtered_scores[d])
                
            with open(cached_file_name,'w') as f:
                json.dump(filtered_scores,f)

        print('the sample number after filtering is %d'%(counter))

        return filtered_scores
        


    def __get_filter_score_by_body_quality__(self,site, body_quality_threahold_min, 
    body_quality_threahold_max, maximum_num_4_each_day):
        def get_metainfo(path):
            body_qualities = load_json(join(path,'score.json'))
            # feats = load_json(join(path,'feats.json'))
            # staff_score = load_json(join(path,'staff_score.json'))
            # print(len(body_qualities),len(feats),len(staff_score))
            # return body_qualities,staff_score,feats
            return body_qualities,None, None

        scores = {}
        staff_scores = {}
        staff_feat = {}


        #todo filter the high quality image  by body quality 
        filtered_scores = {}
        
        #* get body quality
        cached_dir = join(self.data_root, 'cache',self.site)
        make_dir(cached_dir)
        cached_file_name = join(cached_dir,'score#%.2f_%.2f.json'%(body_quality_threahold_min,body_quality_threahold_max))
        
        counter = 0 
        if exists(cached_file_name):
            with open(cached_file_name) as f :
                filtered_scores = json.load(f)
            counter = sum([len(filtered_scores[d]) for d in self.dates])
        else:
            for d in tqdm(self.dates):
                scores[d],staff_scores[d], staff_feat[d] = get_metainfo(join(self.data_root,site,d))
                #todo step 1: filtered by body quality 
                filtered_scores[d] = { k:v for k,v in scores[d].items() if body_quality_threahold_max > v >= body_quality_threahold_min}
                print('original size for %s is %d \t after filtered by body quality, it becomes %d \t'%(d,len(scores[d]), len(filtered_scores[d])))

                #todo step 2:  filter by the acceptable  maximum number 

                if len(filtered_scores[d]) > maximum_num_4_each_day:
                    selected_keys = random.sample(list(filtered_scores[d].keys()), int(maximum_num_4_each_day))
                    filtered_scores[d] = { k: filtered_scores[d][k] for k in selected_keys }
                    print('the size becomes %d, as it is large than threshold \t'%(len(filtered_scores[d])))
                    
                #todo step 3: filtered by image state
                if len(filtered_scores[d]) > 0 :
                    filtered_scores[d] = self.filter_error_img(filtered_scores[d], d)

                print('the final filterd size %s is %d \t'%(d,len(filtered_scores[d])))
                counter += len(filtered_scores[d])
                
            with open(cached_file_name,'w') as f:
                json.dump(filtered_scores,f)

        print('the filter sample number after filter by threshold of %f and %f is %d'%(body_quality_threahold_min,body_quality_threahold_max,counter))

        return filtered_scores
        
        


    def filter_error_img(self,scores,d):

        cache_error_root = join(self.data_root,'error_dir')
        make_dir(cache_error_root)

        tmp_filter_list = { }
        error_img = []

        """
        cache the error file list:
            # error_file = join(cache_error_root,d+'.txt')
            # if not exists(error_file):
                # ....code of find error file process ....
                # np.savetxt(error_file,error_img,fmt='%s')
            # else:
            #     error_img =  np.loadtxt(error_file,delimiter = '\n', dtype= np.str0).tolist()
            #     if isinstance(error_img,str):
            #         error_img = [error_img]
        """

        for iidx, (k,v) in enumerate(tqdm(scores.items())):
            img_file_name = join(self.data_root,self.site, d,k+'.jpg') #! date 
            assert exists(img_file_name)
            try:
                tmp  = Image.open(img_file_name)
                del tmp   
            except Exception as e :
                error_img.append(k)
            if  iidx % 1000 == 0 and iidx >0:
                print('proceed the %d-th image, so far, the number of error image is %d, error rate is %f'%(iidx,len(error_img), len(error_img)/iidx ))
            
        error_img_number = len(error_img) if error_img is not None else 0
        
        print(" the total number of image is %d \t error image number is %d \t error rate is %f \t "%(len(scores), \
            error_img_number, error_img_number/len(scores) ) , end = '\t')

        #todo : update the filtered_scores[d]
        if error_img is not None and len(error_img) != 0:
            for error_k in error_img:
                scores.pop(error_k)

        return scores

    '''
    description:   
    param {*} self
    param {*} site
    param {*} d: date 
    return {*}
    '''
    def __get_vtp_file_name__(self,site,d):
        file_name = "%s/%s/%s/prod/data/prod/customer/%s/%s/%s/events/lite2full/%s/trajectory_vtp_whole_day/realtime.vtp.json"%( \
            self.data_root.replace('images','extra_labels'),site,d,*site.split('_'),d,)

        return file_name

    

    def __get_pid_map__(self,site,):

        cache_path = join(self.data_root,'cache',self.site,'tid2pid.json')
        
        if exists(cache_path):
            with open(cache_path,) as f:
                filtered_tid2pid = json.load(f)
        else:
            extra_label_root = self.data_root.replace('images','extra_labels')
            print('data_root: \t ',self.data_root,'\t new data_root for extra label:', extra_label_root)
            tid2pid  = {}
            for d in tqdm(self.dates):
                file_name = self.__get_vtp_file_name__(site,d)
                assert exists(file_name)                
                with open(file_name) as f:
                    tid2pid[d] = json.load(f)

            filtered_tid2pid  = {}
            for d in tqdm(self.dates):
                filtered_tid2pid[d] = {k:tid2pid[d][k] for k,v in self.scores[d].items()}


            with open(cache_path,'w') as f:
                json.dump(filtered_tid2pid,f)

            print('filtered_tid2pid is saved at %s'%(cache_path))
            
        pid = {} 
        counter = 0
        for d in self.dates:
            pid[d] = set(filtered_tid2pid[d].values())
            counter += len(pid[d])
            
        print('pid number is %d'%(counter))
        return filtered_tid2pid, pid
        
        
        


    def __len__(self,):
        return len(self.imgs)



    def __filter_sample_by_multiple_threshold_range__(self):

        threshold_range = np.arange(0.1,1.02,0.1).tolist()
        maximum_num_4_each_day = 1e3
        threshold_samples = {}
        for i in range(len(threshold_range) -1 ):
            threshold_samples[threshold_range[i]] = self.__get_filter_score_by_body_quality__(site,threshold_range[i], threshold_range[i+1], maximum_num_4_each_day)

        sample_nums = sum([len(v[d])  for k,v in threshold_samples.items() for d in self.dates])
        print('total sample number for this data is ', sample_nums)
        merged_scores = {}
        for k,v in threshold_samples.items():
            for d in self.dates:
                tmp = merged_scores.get(d,{})
                tmp.update(v[d])
                merged_scores[d] = tmp
        return merged_scores



if __name__ == "__main__":

    print(" hello world")
    DataProcessor('/root/mall_stat/data/mall_staff_full/images','CR_zhongshan_wxhpoc')
