import torch
from torch import optim
import os
from torch.utils.data import DataLoader,ConcatDataset
from tqdm import tqdm
from colorama import Fore
from torch import nn
from torch.cuda.amp import autocast,GradScaler

from configs.demo import GetConfig
from datalist import Market1501,MSMT,DukeMTMC,collate_fn
from models import ImageEncoder
from utils import SupConLoss,CenterLoss,CircleLoss


class CLIPREIDTrainStage1:
    '''
    train prompt
    '''
    def __init__(self) -> None:
        self.args = GetConfig()
        print('-'*10,f'{self.args.project_name}','-'*10)

        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device('cuda' if use_cuda else 'cpu')

        if use_cuda:
            torch.backends.cudnn.benchmark       = True
            torch.backends.cudnn.deterministic   = True
            torch.backends.cuda.matmul.allow_tf32= True
            torch.backends.cudnn.allow_tf32      = True


        kwargs = {
            'num_workers':os.cpu_count(),
            # 'pin_memory':True,
            # 'prefetch_factor':4,
            # 'persistent_workers':True,
            # 'drop_last':False,
        } if use_cuda else {}


        train_market1501 = Market1501(
                                    begin_pid        = 0,
                                    pre_process      = None,
                                    data_dir         = self.args.train_dir_market1501_dir,
                                    extra_annotations= None,
                                    mode             = 'train',
                                    color_space      = 'RGB'
                                )
        num_class = len(list(train_market1501.person_id2label.keys()))
       
        train_dataset = ConcatDataset(
            [
                train_market1501,
            ]
        )

        self.train_dataloader = DataLoader(
            train_dataset,
            batch_size=self.args.train_batch_size,
            shuffle=True,
            collate_fn=None,
            **kwargs
        )

        print('total ids num:',num_class)

        self.model = ImageEncoder(num_class=num_class)

        self.model.text_encoder.requires_grad_(False)
        self.model.image_encoder.requires_grad_(False)
        self.model.prompt_learner.requires_grad_(True)
        self.model.text_encoder.eval()
        self.model.image_encoder.eval()
        
        if use_cuda:
            self.model = torch.nn.DataParallel(self.model)
        
        if self.args.resume:
            print('load weight from pretrained_weight file')
            checkpoint = torch.load(self.args.prompt_learner_pretrain_weight,map_location='cpu')
            model_dict = self.model.module.prompt_learner.state_dict()
            pretrained_dict = checkpoint['model_state_dict']
            model_dict.update(pretrained_dict)
            self.model.module.prompt_learner.load_state_dict(model_dict)
            print('Finished loading the weight')
                
        self.optimizer = optim.AdamW(
            lr = self.args.lr,
            params=self.model.module.prompt_learner.parameters(),
            weight_decay=1e-4,
        )

        self.loss = SupConLoss(self.device)

        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=50, gamma=0.1)

        self.scaler = GradScaler()
        

    def work(self):
        for epoch in range(1,self.args.epochs):
            self.train(epoch)
            self.save_checkpoint(
                save_path=os.path.join(self.args.save_path,self.args.project_name,'stage1',str(epoch)+'.pth'),
                epoch=epoch,
                )
            self.scheduler.step()
        torch.cuda.empty_cache()
        print('finished model training')
    
    def train(self,epoch):
        self.model.train()
        torch.cuda.empty_cache()

        pbar = tqdm(
                    self.train_dataloader,
                    desc=f'Train Epoch {epoch}/{self.args.epochs}',
                    bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET)
                    )
        loss_total = 0
        loss_num_count = 0
        for image,person_id,_,_,_ in pbar:
          # image,person_id,camera_id,video_id,image_path
            self.optimizer.zero_grad()

            image = image.to(self.device)
            person_id = person_id.to(self.device)
            with autocast():
                with torch.no_grad():
                    image_features_proj = self.model(x=image,label=person_id,get_image=True).detach()
                text_features = self.model(x=image,label=person_id,get_text=True)

                loss_t2i = self.loss(text_features,image_features_proj,person_id,person_id)
                loss_i2t = self.loss(image_features_proj,text_features,person_id,person_id)
                loss = 1*loss_t2i + 1*loss_i2t
            
            self.scaler.scale(loss).backward()
            self.scaler.step(self.optimizer)
            self.scaler.update()


            loss_total += loss.item()
            loss_num_count+=1

            pbar.set_description(
                f'Epoch: {epoch}/{self.args.epochs}' +
                f' | lr: {self.optimizer.param_groups[0]["lr"]:.4f}' +
                f' | Loss: {loss_total/loss_num_count:.4f}' 
            )

            

    def save_checkpoint(self,save_path,epoch):
        print(save_path)
        os.makedirs(os.path.dirname(save_path),exist_ok=True)
        torch.save(
            {
                'epoch': epoch,
                'model_state_dict': self.model.module.prompt_learner.state_dict(),
            }, save_path
        )


class CLIPREIDTrainStage2:
    '''
    train image encoder
    '''
    def __init__(self) -> None:
        self.args = GetConfig()
        print('-'*10,f'{self.args.project_name}','-'*10)

        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        self.device = torch.device('cuda' if use_cuda else 'cpu')

        if use_cuda:
            torch.backends.cudnn.benchmark       = True
            torch.backends.cudnn.deterministic   = True
            torch.backends.cuda.matmul.allow_tf32= True
            torch.backends.cudnn.allow_tf32      = True

        kwargs = {
            'num_workers':os.cpu_count(),
            # 'pin_memory':True,
            # 'prefetch_factor':4,
            # 'persistent_workers':True,
            # 'drop_last':False,
        } if use_cuda else {}

        train_dataset = Market1501(
                                    begin_pid        = 0,
                                    pre_process      = None,
                                    data_dir         = self.args.train_dir,
                                    extra_annotations= None,
                                    mode             = 'train',
                                    color_space      = 'RGB'
                                )
        
        self.train_dataloader = DataLoader(
            train_dataset,
            batch_size=self.args.train_batch_size,
            shuffle=True,
            collate_fn=None,
            **kwargs
        )



        num_class = len(list(train_dataset.person_id2label.keys()))
        
        print('total ids num:',num_class)

        self.model = ImageEncoder(num_class=num_class)
        self.model.text_encoder.requires_grad_(False)
        self.model.prompt_learner.requires_grad_(False)
        self.model.text_encoder.eval()
        self.model.prompt_learner.eval()
        
        if use_cuda:
            self.model = torch.nn.DataParallel(self.model)
                
       
        # checkpoint = torch.load(self.args.prompt_learner_pretrain_weight,map_location='cpu')
        checkpoint = torch.load('./weights/clip-reid/81.pth',map_location='cpu')
        model_dict = self.model.module.prompt_learner.state_dict()
        pretrained_dict = checkpoint['model_state_dict']
        new_pretrained_dict = {}
        for k,v in pretrained_dict.items():
            new_pretrained_dict[k.replace('module.','')]=v
        self.model.module.prompt_learner.load_state_dict(new_pretrained_dict,strict=True)

        if self.args.resume:
            print('load weight from pretrained_weight file')
            checkpoint = torch.load(self.args.image_encoder_pretrain_weight,map_location='cpu')
            model_dict = self.model.module.image_encoder.state_dict()
            pretrained_dict = checkpoint['model_state_dict']
            new_pretrained_dict = {}
            for k,v in pretrained_dict.items():
                new_pretrained_dict[k.replace('module.','')]=v
            self.model.module.image_encoder.load_state_dict(new_pretrained_dict,strict=True)
            print('Finished loading the weight')

        self.center_loss = CenterLoss(num_classes=num_class,feat_dim=512,use_gpu=use_cuda,use_amp=True)
        self.loss = nn.CrossEntropyLoss()
        self.optimizer = optim.AdamW(
            lr = 1e-5,
            params = self.model.module.image_encoder.parameters(),
            weight_decay = 1e-4,
        )
        self.optimizer_center = optim.AdamW(
            lr = self.args.center_lr,
            params = self.center_loss.parameters(),
            weight_decay = 1e-4,
        )

        labels = torch.Tensor(list(train_dataset.person_id2label.values())).long()
        text_features = []
        with torch.no_grad():
            pbar = tqdm(range(0, len(labels), 1), desc="制作text features", colour='green')
            for i in pbar:
                # 模拟模型输出
                text_feature = self.model(x=None,label=labels[i].unsqueeze(0),get_text=True)
                text_features.append(text_feature)             
        self.text_features = torch.cat(text_features).detach()
        print(self.text_features.shape)

        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=50, gamma=0.1)
        self.circle_loss = CircleLoss(in_features=512,num_classes=751)
        self.scaler = GradScaler()
    def work(self):
        for epoch in range(1,self.args.epochs):
            self.train(epoch)
            self.save_checkpoint(
                save_path=os.path.join(self.args.save_path,self.args.project_name,'stage2',str(epoch)+'.pth'),
                epoch=epoch,
            )
            self.scheduler.step()
        torch.cuda.empty_cache()
        print('finished model training')
    def train(self,epoch):
        self.model.module.image_encoder.train()
        torch.cuda.empty_cache()

        pbar = tqdm(
                    self.train_dataloader,
                    desc=f'Train Epoch {epoch}/{self.args.epochs}',
                    bar_format='{l_bar}%s{bar}%s{r_bar}' % (Fore.BLUE, Fore.RESET)
                    )
        
        for image,person_id,_,_,_ in pbar:
        #   image,person_id,camera_id,video_id,image_path
            self.optimizer.zero_grad()
            self.optimizer_center.zero_grad()

            image = image.to(self.device)
            person_id = person_id.to(self.device)

            with autocast():
                score,feat,image_features = self.model(x=image,label=person_id,cam_label=None,view_label=None)
                logits = image_features @ self.text_features.t()
                
                loss_i2t = self.loss(logits,person_id)
                loss_id = 0
                for s in score:
                    loss_id += self.loss(s,person_id)
                loss_center = self.center_loss(image_features,person_id)
                loss_circle = self.circle_loss(image_features,person_id)
                loss = loss_i2t + loss_id + loss_circle + loss_center 
            self.scaler.scale(loss).backward()
            self.scaler.step(self.optimizer)
            self.scaler.step(self.optimizer_center)
            self.scaler.update()


            pbar.set_description(
                f'Epoch: {epoch}/{self.args.epochs}' +
                f' | lr: {self.optimizer.param_groups[0]["lr"]:.4f}' +
                f' | Loss: {loss.item():.4f}' 
            )
    @torch.no_grad()
    def test(self,epoch):
        self.model.eval()
        pass
    def save_checkpoint(self,save_path,epoch):
        print(save_path)
        os.makedirs(os.path.dirname(save_path),exist_ok=True)
        torch.save(
            {
                'epoch': epoch,
                'model_state_dict': self.model.module.image_encoder.state_dict(),
            }, save_path
        )
        

if __name__ == "__main__":
    model = CLIPREIDTrainStage2()
    model.work()
        


