import torch
from torch.utils.data import Dataset, DataLoader
from torch import nn
from src import *
from argparse import ArgumentParser
import json
import os
class TranslateDataset(Dataset):
    def __init__(self, path, tokenizer):
        super(Dataset, self).__init__()
        self.tokenizer=tokenizer
        self.data=[]
        with open(path, 'r', encoding='utf8')as file:
            for line in file:
                try:
                    content=json.loads(line)
                    self.data.append((content["english"], content["chinese"]))
                except:
                    print('broken line')

    def __len__(self):
        return len(self.data)
    
    def __getitem__(self, idx):
        return self.tokenizer(self.data[idx][0]), self.tokenizer(self.data[idx][1])
    
def collate_fn(batch, num_classes):
    x=[item[0] for item in batch]
    x_valid_len=[item[1] for item in batch]
    y=[item[2] for item in batch]
    y_valid_len=[item[3] for item in batch]
    
    x_valid_len_tensor = torch.tensor(x_valid_len, dtype=torch.float32)
    y_valid_len_tensor = torch.tensor(y_valid_len, dtype=torch.float32)
    
    x_tensor = torch.tensor(x, dtype=torch.float32)
    y_tensor = torch.tensor(y, dtype=torch.float32)
    x_one_hot = nn.functional.one_hot(x_tensor, num_classes=num_classes)
    y_one_hot = nn.functional.one_hot(y_tensor, num_classes=num_classes)
    
    return {
        'x': x_one_hot,
        'y': y_one_hot,
        'x_valid_len': x_valid_len_tensor,
        'y_valid_len': y_valid_len_tensor
    }
    
    
def main(args):
    model=NaiveTransformerTranslater(n_encoder=12, n_decoder=12, hidden_dim=512, num_heads=8, vocab_size=5248)
    train_path=os.path.join(args.data_path, 'train.txt')
    test_path=os.path.join(args.data_path, 'test.txt')
    config_path=os.path.join(args.data_path, 'config.json')
    # 加载数据集基本信息
    with open(config_path, 'r', encoding='utf-8')as file:
        content=json.load(file)
        vocab_size=content['vocab size']
    
    # 构建数据集
    tokenizer=AutoTokenizer(len=args.seq_len)
    train_dataset=TranslateDataset(path=train_path, tokenizer=tokenizer)
    test_dataset=TranslateDataset(path=test_path)
    train_loader=DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers, collate_fn=lambda batch: collate_fn(batch, vocab_size))
    test_loader=DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_workers, collate_fn=lambda batch: collate_fn(batch, vocab_size))
    
    # 训练相关参数
    optimizer=torch.optim.AdamW(model.parameters(), lr=args.lr)
    epoch=args.num_epoch
    device='cuda'if torch.cuda.is_available() else 'cpu'
    loss_fn=nn.CrossEntropyLoss()
    
    model.to(device)
    # 开始训练
    for e in range(epoch):
        model.train()
        train_loss=0
        test_loss=0
        for idx, data in enumerate(train_loader):
            # 解码
            x=data['x'].to(device)
            y=data['y'].to(device)
            x_valid_len=data['x_valid_len'].to(device)
            y_valid_len=data['y_valid_len'].to(device)
            
            # 预测
            optimizer.zero_grad()
            predict=model(x, y, x_valid_len, y_valid_len)
            loss=loss_fn(predict, y)
            train_loss+=loss.item()/len(train_dataset)
            loss.backward()
            optimizer.step()
        
        # 来一轮测试
        
        with torch.no_grad():
            for idx, data in enumerate(test_dataset):
                # 解码
                x=data['x'].to(device)
                y=data['y'].to(device)
                x_valid_len=data['x_valid_len'].to(device)
                y_valid_len=data['y_valid_len'].to(device)
                
                # 预测
                predict=model(x, y, x_valid_len, y_valid_len)
                loss=loss_fn(predict, y)
                test_loss+=loss.item()/len(test_dataset)
        print(f"epoch = {e} train_loss = {train_loss} test_loss = {test_loss}")

if __name__=='__main__':
    parser=ArgumentParser()
    parser.add_argument('--data_path', default='data', type=str, help='训练文件夹地址')
    parser.add_argument('--num_epoch', default=5, type=int)
    parser.add_argument('--batch_size', default=1)
    parser.add_argument('--lr', default=1e-4, type=float)
    parser.add_argument('--precision', default=torch.float32, help='训练精度')
    parser.add_argument('--num_workers', default=4, help='加载数据集的线程数')
    parser.add_argument('--seq_len', default=250, help='序列最大长度')
    args=parser.parse_args()
    main(args)

