import math
import sys
import pickle
import time
from torch.utils.data import Dataset, DataLoader, TensorDataset
from model import CLSTM
import numpy as np
from typing import List, Tuple, Dict, Set, Union
from tqdm import tqdm
import torch
import torch.nn.utils
from pathlib import Path
from config import data_dir, batch_size, clip_grad, output_dir, hidden_size, lr, epoches, \
                src_path_standard, tgt_path_standard

def train(src_path, tgt_path):
    ## src_path: "./src_data.pkl"  tgt_path: "./tgt_data.pkl"
    src_data = torch.load(src_path)
    tgt_data = torch.load(tgt_path)

    dataset = TensorDataset(src_data, tgt_data)
    data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"using device: {device}")

    model = CLSTM(hidden_size, device)
    model.train()
    model = model.to(device)
    uniform_init = float(0.1)
    for p in model.parameters():
        p.data.uniform_(-uniform_init, uniform_init)

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    begin_time = time.time()
    train_iter = report_loss = 0
    epoch = report_examples = 0
    log_every = 10
    
    while True:
        epoch += 1
        for src_d, tgt_d in data_loader:
            train_iter += 1
            optimizer.zero_grad()
            loss = model(src_d, tgt_d)
            batch_loss = loss.item()
            loss.backward()
            # clip gradient
            grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), clip_grad)
            optimizer.step()

            report_loss += batch_loss
            report_examples += batch_size

            if train_iter % log_every == 0:
                print('epoch %d, iter %d, avg. loss %.5f ' \
                      'time elapsed %.2f sec' % (epoch, train_iter,
                                                report_loss / report_examples,
                                                time.time() - begin_time), file=sys.stderr)

                report_loss = report_examples = 0.
        
        ## 每一个epoch 保存一次模型
        torch.save(model.state_dict(), "model_dict_clstm.pkl")

        if epoch == int(epoches):
            print('reached maximum number of epochs!')
            break 
            
    
if __name__ == "__main__":
    train(src_path_standard, tgt_path_standard)