import argparse
import torch
import torch.nn as nn
import torch_npu
from torch.utils.data import DataLoader
import torchvision
from torchvision.transforms import Compose, ToTensor, Resize
from torch import optim
import numpy as np
from torch.hub import tqdm

from einops.layers.torch import Rearrange

experimental_config = torch_npu.profiler._ExperimentalConfig(
    export_type=[
        torch_npu.profiler.ExportType.Text
        ],
    profiler_level=torch_npu.profiler.ProfilerLevel.Level1,
    msprof_tx=False,
    aic_metrics=torch_npu.profiler.AiCMetrics.AiCoreNone,
    l2_cache=False,
    op_attr=False,
    data_simplification=False,
    record_op_args=False,
    gc_detect_threshold=None
)

prof = torch_npu.profiler.profile(
    activities=[
        torch_npu.profiler.ProfilerActivity.CPU,
        torch_npu.profiler.ProfilerActivity.NPU
        ],
    schedule=torch_npu.profiler.schedule(wait=0, warmup=0, active=3, repeat=1, skip_first=1),
	on_trace_ready=torch_npu.profiler.tensorboard_trace_handler("./result_opt1"),
    record_shapes=False,
    profile_memory=False,
    with_stack=False,
    with_modules=False,
    with_flops=False,
    experimental_config=experimental_config)




class PatchExtractor(nn.Module):
    def __init__(self, patch_size=16):
        super().__init__()
        self.patch_size = patch_size
        self.rearrange = Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = patch_size, p2 = patch_size)

    def forward(self, input_data):
        batch_size, channels, height, width = input_data.size()
        assert height % self.patch_size == 0 and width % self.patch_size == 0, \
            f"Input height ({height}) and width ({width}) must be divisible by patch size ({self.patch_size})"

        num_patches_h = height // self.patch_size
        num_patches_w = width // self.patch_size
        num_patches = num_patches_h * num_patches_w
        
        patches = input_data.unfold(2, self.patch_size, self.patch_size). \
            unfold(3, self.patch_size, self.patch_size). \
            permute(0, 2, 3, 1, 4, 5). \
            contiguous(). \
            view(batch_size, num_patches, -1)
            
        # ### ????? 
        # patches = self.rearrange(input_data)
        # Expected shape of a patch on default settings is (4, 196, 768)
        return patches


class InputEmbedding(nn.Module):

    def __init__(self, args):
        super(InputEmbedding, self).__init__()
        self.patch_size = args.patch_size
        self.n_channels = args.n_channels
        self.latent_size = args.latent_size
        self.device = args.device
        self.batch_size = args.batch_size
        self.input_size = self.patch_size * self.patch_size * self.n_channels
        
        #### added 
        self.norm1 = nn.LayerNorm(self.input_size)
            
        # Linear projection
        self.LinearProjection = nn.Linear(self.input_size, self.latent_size)
        
        #### added 
        self.norm2 = nn.LayerNorm(self.latent_size)

        # Class token
        self.class_token = nn.Parameter(torch.randn(1, 1, self.latent_size).to(self.device))
        self.pos_embedding = nn.Parameter(torch.randn(1, int(args.img_size/self.patch_size)*int(args.img_size/self.patch_size) + 1, self.latent_size).to(self.device))


    def forward(self, input_data):
        logon = False
        input_data = input_data.to(self.device)
        # Patchifying the Image
        patchify = PatchExtractor(patch_size=self.patch_size)
        patches = patchify(input_data)
        if logon: print(f'input embedding ==> input {input_data.shape} {input_data.device}  patches {patches.shape}  {patches.device}')
        
        #### added      linear_projection = self.LinearProjection(patches)
        linear_projection = self.norm2(self.LinearProjection(self.norm1(patches)))
        if logon: print(f'input embedding ==> linear_projection {linear_projection.shape} ')
        # linear_projection torch.Size([64, 196, 768])      

        b, n, _ = linear_projection.shape
        class_tokens = self.class_token.expand(b, -1, -1)
        linear_projection = torch.cat((class_tokens, linear_projection), dim=1)

        if logon: print(f'input embedding ==> class_token {class_tokens.shape}  linear_projection {linear_projection.shape}')
        # class_token       torch.Size([64, 1, 768])            [batch, 1, latent_size]
        # linear_projection torch.Size([64, 197, 768])
        # [batch, 1, latent_size] + [batch, 196, latent_size] ===> [batch, 197, latent_size] 

        pos_embed = self.pos_embedding[:, :n + 1, :]
        if logon: print(f'input embedding ==> {n} pos1:{self.pos_embedding.shape}   pos2:{pos_embed.shape}')
        # n = 196
        # self.pos_embedding/pos1     torch.Size([64, 1, 768])   
        #  pos_embed/pos2             
        linear_projection += pos_embed
        if logon: print(f'input embedding ==> output {linear_projection.shape}  ')
        #  output torch.Size([64, 197, 768])  
        #  [batch, number of patches + 1, patch data size]  

        return linear_projection

class EncoderBlock(nn.Module):

    def __init__(self, args):
        super(EncoderBlock, self).__init__()

        self.latent_size = args.latent_size
        self.num_heads = args.num_heads
        self.dropout = args.dropout
        self.norm = nn.LayerNorm(self.latent_size)
        # batch_first=True  TODO !!!!!!!!!!!  
        self.attention = nn.MultiheadAttention(self.latent_size, self.num_heads, dropout=self.dropout, batch_first=True)
        self.enc_MLP = nn.Sequential(
            nn.Linear(self.latent_size, self.latent_size * 4),
            nn.GELU(),
            nn.Dropout(self.dropout),
            nn.Linear(self.latent_size * 4, self.latent_size),
            nn.Dropout(self.dropout)
        )

    def forward(self, emb_patches):
        first_norm = self.norm(emb_patches)
        attention_out = self.attention(first_norm, first_norm, first_norm)[0]
        first_added = attention_out + emb_patches
        second_norm = self.norm(first_added)
        mlp_out = self.enc_MLP(second_norm)
        output = mlp_out + first_added

        return output

class ViT(nn.Module):
    def __init__(self, args):
        super(ViT, self).__init__()

        self.num_encoders = args.num_encoders
        self.latent_size = args.latent_size
        self.num_classes = args.num_classes
        self.dropout = args.dropout

        self.embedding = InputEmbedding(args)
        # Encoder Stack
        self.encoders = nn.ModuleList([EncoderBlock(args) for _ in range(self.num_encoders)])
        self.MLPHead = nn.Sequential(
            nn.LayerNorm(self.latent_size),
            nn.Linear(self.latent_size, self.latent_size),
            nn.Linear(self.latent_size, self.num_classes),
        )

    def forward(self, test_input):
        logon = False
        enc_output = self.embedding(test_input)
        if logon: print(f"input:{test_input.shape} {test_input.device}     enc output:{enc_output.shape} {enc_output.device}") 
        # input:torch.Size([4, 3, 224, 224])      [batch, channel, w, h]
        # enc output: torch.Size([4, 197, 768])  
        for idx, enc_layer in enumerate(self.encoders):
            enc_output = enc_layer(enc_output)
            if logon: print(f" encoder layer:{idx} --> {enc_output.shape} {enc_output.device}") 

        class_token_embed = enc_output[:, 0]
        if logon: print(f" class_token_embed   {enc_output.shape}   {class_token_embed.shape}") 


        cls_result = self.MLPHead(class_token_embed)
        if logon: print(f" model output:   {cls_result.shape}  ") 
        #  model output:   torch.Size([4, 10])
        #                             [batch, num of classes]
        return cls_result


class TrainEval:

    def __init__(self, args, model, train_dataloader, val_dataloader, optimizer, criterion, device):
        self.model = model
        self.train_dataloader = train_dataloader
        self.val_dataloader = val_dataloader
        self.optimizer = optimizer
        self.criterion = criterion
        self.epoch = 1
        self.device = device
        self.args = args

    def train_fn(self, current_epoch):
        self.model.train()
        total_loss = 0.0
        tk = tqdm(self.train_dataloader, desc="EPOCH" + "[TRAIN]" + str(current_epoch + 1) + "/" + str(self.epoch))
        prof.start()
        for t, data in enumerate(tk):
            images, labels = data
            images, labels = images.to(self.device), labels.to(self.device)
            self.optimizer.zero_grad()
            logits = self.model(images)
                        
            loss = self.criterion(logits, labels)
            loss.backward()
            self.optimizer.step()

            total_loss += loss.item()
            prof.step()
            tk.set_postfix({"Loss": "%6f" % float(total_loss / (t + 1))})
            if self.args.dry_run:
                break
        prof.stop()
        return total_loss / len(self.train_dataloader)

    def eval_fn(self, current_epoch):
        self.model.eval()
        total_loss = 0.0
        tk = tqdm(self.val_dataloader, desc="EPOCH" + "[VALID]" + str(current_epoch + 1) + "/" + str(self.epoch))
        
        correct = 0
        total = 0
        
        for t, data in enumerate(tk):
            images, labels = data
            images, labels = images.to(self.device), labels.to(self.device)

            logits = self.model(images)

            # 1. 获取预测类别
            predicted = torch.argmax(logits, dim=1)

            # 2. 计算正确预测数
            correct_ = (predicted == labels).sum().item()
            
            correct += correct_
            total += labels.size(0)

            loss = self.criterion(logits, labels)

            total_loss += loss.item()
            tk.set_postfix({"Loss": "%6f" % float(total_loss / (t + 1)) , "Acc": "%6f" % float(correct / total)})
            if self.args.dry_run:
                break

        return total_loss / len(self.val_dataloader)

    def train(self):
        best_valid_loss = np.inf
        best_train_loss = np.inf
        print(f'total epochs {self.epoch}')
        for i in range(self.epoch):
            train_loss = self.train_fn(i)
            val_loss = self.eval_fn(i)

            if val_loss < best_valid_loss:
                torch.save(self.model.state_dict(), "best-weights.pt")
                print("Saved Best Weights")
                best_valid_loss = val_loss
                best_train_loss = train_loss
        print(f"Training Loss : {best_train_loss}")
        print(f"Valid Loss : {best_valid_loss}")


def main():
    parser = argparse.ArgumentParser(description='Vision Transformer in PyTorch')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--patch-size', type=int, default=16,
                        help='patch size for images (default : 16)')
    parser.add_argument('--latent-size', type=int, default=768,
                        help='latent size (default : 768)')
    parser.add_argument('--n-channels', type=int, default=3,
                        help='number of channels in images (default : 3 for RGB)')
    parser.add_argument('--num-heads', type=int, default=12,
                        help='(default : 12)')
    parser.add_argument('--num-encoders', type=int, default=12,
                        help='number of encoders (default : 12)')
    parser.add_argument('--dropout', type=int, default=0.1,
                        help='dropout value (default : 0.1)')
    parser.add_argument('--img-size', type=int, default=224,
                        help='image size to be reshaped to (default : 224')
    parser.add_argument('--num-classes', type=int, default=10,
                        help='number of classes in dataset (default : 10 for CIFAR10)')
    parser.add_argument('--epochs', type=int, default=100,
                        help='number of epochs (default : 10)')
    parser.add_argument('--lr', type=float, default=1e-4,
                        help='base learning rate (default : 0.01)')
    parser.add_argument('--weight-decay', type=int, default=3e-2,
                        help='weight decay value (default : 0.03)')
    parser.add_argument('--batch-size', type=int, default=64,
                        help='batch size (default : 4)')
    parser.add_argument('--dry-run', action='store_true', default=False,
                        help='quickly check a single pass')
    args = parser.parse_args()

    device = torch.device("npu:0")

    transforms = Compose([
        Resize((args.img_size, args.img_size)),
        ToTensor()
    ])
    train_data = torchvision.datasets.CIFAR10(root='./cifar10', train=True, download=True, transform=transforms)
    valid_data = torchvision.datasets.CIFAR10(root='./cifar10', train=False, download=True, transform=transforms)
    train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True)
    valid_loader = DataLoader(valid_data, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True)
    args.device = device
    
    model = ViT(args).to(device)   ######### TODO !!!! 
    print(args)

    #optimizer = optim.AdamW(model.parameters(), lr=args.lr)
    optimizer = torch_npu.optim.NpuFusedAdamW(model.parameters(), lr=args.lr)

    
    criterion = nn.CrossEntropyLoss()

    TrainEval(args, model, train_loader, valid_loader, optimizer, criterion, device).train()


if __name__ == "__main__":
    print("---------")
    torch.npu.config.allow_internal_format = False
    torch.npu.set_compile_mode(jit_compile=False)
    main()



