import torch
import os
import sys, os
stensor_dir = (os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../')))
sys.path.append(stensor_dir)
print("sys.path: ",sys.path)

def test_case():
    import time
    import numpy as np
    import pytest
    import pandas as pd

    from stensor.model import GPTConfig, GPTModel, PretrainDatasetFromBin, PretrainDataset, SFTDataset
    from stensor import Tensor, MomentumSGD, Config, ClipGrad, no_grad, DistrubutedSGD
    from stensor.dataset import  DataLoader
    from stensor.ops import functional as F


    import pandas as pd
    from transformers import AutoTokenizer
    tokenizer = AutoTokenizer.from_pretrained(r'./tests/resource/gpt/tokenizer')
    
    config = GPTConfig(batch_size=1, num_layers=8)
    #config = GPTConfig(batch_size=1, hidden_size=8, num_layers=1, max_seq_len=4)
    df = pd.read_csv(r"./tests/resource/pretrain_data_100.csv")
    #df = df.sample(frac=1.0)
    train_ds = PretrainDataset(df, tokenizer, max_length=config.max_seq_len)
    train_loader = DataLoader(train_ds, batch_size=config.batch_size, shuffle=False)
    print("dataset length: ", len(train_ds))
    device = "gpu" if Config.gpu_enable else "cpu"
    model = GPTModel(config)
    model.load_weights(r'../ckpt/pretrain_512_data_100.npz')
    model.to(device)
    model.train()
    optimizer = DistrubutedSGD(model.parameters(), group=group, lr=0.01)
    print("total_parameters_count: ",model.total_parameters_count())
    print("==============")
    for n, p in model.names_and_parameters():
        print(n, p.shape, p.dtype, p.device)
    print("==============")
    for n, m in model.names_and_submodules():
        print(n, m)
    accumulation_steps = 1
    epochs = 1  
    start_time = time.time()
    
    for epoch in range(epochs):
        for step, (X, Y, loss_mask) in enumerate(train_loader):
            if step == 0:
                X1, Y1, loss_mask1 = Tensor(X).to(device), Tensor(Y).to(device), Tensor(loss_mask).to(device)
            if step == 1:
                X2, Y2, loss_mask2 = Tensor(X).to(device), Tensor(Y).to(device), Tensor(loss_mask).to(device)
            if step == 2:
                break
            
    for epoch in range(epochs):
        for step, (X, Y, loss_mask) in enumerate(train_loader):
            X, Y, loss_mask = Tensor(X).to(device), Tensor(Y).to(device), Tensor(loss_mask).to(device)
            
            if rank == 0:
                _, loss = model(X1, Y1, loss_mask=loss_mask1)
            if rank == 1:
                _, loss = model(X2, Y2, loss_mask=loss_mask2)
            loss.backward()
            # for n, p in model.names_and_parameters():
            #     if p.grad is not None:          
            #         print(n, "grad",p.grad.dtype, p.grad.shape, p.grad)       
            
            # 每 accumulation_steps 次更新一次权重
            if (step + 1) % accumulation_steps == 0:
                optimizer.step()
                optimizer.zero_grad()

            #if (step + 1) % 10 == 0:
            print(f"rank:{torch.distributed.get_rank()} out of {torch.distributed.get_world_size()} worlds." \
                    f"epoch {epoch}, step {step}, loss: {loss.item:.6f}, lr: {optimizer.lr:.6f}, "\
                    f"cost time: {time.time() - start_time:.6f}")

            break
                #model.save_weights(f'./test_gpt_model_pretrain_dataset_step_{(step+1)}.npz')
        # # 处理剩余的梯度，如果数据集大小不能整除 accumulation_steps
        if (step + 1) % accumulation_steps != 0:
            optimizer.step()
            optimizer.zero_grad()
    #model.save_weights('./tests/resource/test_gpt_model_pretrain_dataset_0.npz')



if __name__ == '__main__':
    rank = int(os.getenv('RANK','0'))
    world_size = int(os.getenv('WORLD_SIZE','1'))

    torch.distributed.init_process_group(rank=rank, world_size=world_size, backend='nccl')
    ranks = [0,1]
    group = torch.distributed.new_group(ranks)
    torch.cuda.set_device(rank)
    tensor = torch.ones((3,4), dtype=torch.long, device='cuda')
    #tensor = Tensor(cp.ones((3,4)), device='gpu')
    torch.distributed.all_reduce(tensor)
    print(f'rank:{rank} {tensor}')
    print(f'Hello World from rank:{torch.distributed.get_rank()} out of {torch.distributed.get_world_size()} worlds.')
    test_case()