import json
import os.path
import re

import torch
from attr.validators import max_len
from datashader import examples
from sqlalchemy.sql.functions import random
from torch import nn, candidate
import torch.utils.data.dataset
from torch.utils.data import Dataset

import dltools
import random

from NLP_selfAtttention import batch_size


# 构建输入数据格式
def get_tokens_and_segments(token_a, token_b=None):
    # classification
    tokens = ["<cls>"] + token_a + ["<sep>"]
    segments = [0] * len(tokens)
    if token_b is not None:
        tokens += token_b + ["<sep>"]
        segments += [1] * (len(tokens) - len(segments))
    return tokens, segments


res = get_tokens_and_segments([1, 2, 3], [4, 5, 6])
print(res)

print(f"torch.randn(1, 100, 20):{torch.randn(1, 100, 20)}")


class BERTEncoder(nn.Module):
    def __init__(self, vocab_size, num_hiddens, norm_shape, ffn_num_hiddens, num_heads, num_layers, dropout,
                 max_len=1000, key_size=768, query_size=768, value_size=768, **kwargs):
        super().__init__(**kwargs)
        self.token_embedding = nn.Embedding(vocab_size, num_hiddens)
        self.segment_embedding = nn.Embedding(2, num_hiddens)
        self.blks = nn.Sequential()
        for i in range(num_layers):
            self.blks.add_module(str(i),
                                 dltools.EncoderBlock(key_size, query_size, value_size, num_hiddens, norm_shape,
                                                      num_hiddens, ffn_num_hiddens, num_heads, dropout, use_bias=True))

        self.pos_embedding = nn.Parameter(torch.randn(1, max_len, num_hiddens))

    def forward(self, tokens, segments, valid_lens):
        # X需要由token_embedding + segment_embedding + pos_embedding
        X = self.token_embedding(tokens) + self.segment_embedding(segments) + self.pos_embedding.data[:,
                                                                              :tokens.shape[1], :]
        for blk in self.blks:
            X = blk(X, valid_lens)
        return X


vocab_size, num_hiddens, ffn_num_hiddens, num_heads = 10000, 768, 1024, 4
norm_shape, ffn_num_input, num_layers, dropout = [768], 768, 2, 0.1
encoder_1 = BERTEncoder(vocab_size, num_hiddens, norm_shape, ffn_num_hiddens, num_heads, num_layers, dropout)

tokens = torch.randint(0, vocab_size, (2, 8))
segments = torch.tensor([[0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 1, 1]])
encoder_1_X = encoder_1(tokens, segments, None)
print(f"encoder_1_X.shape:{encoder_1_X.shape}")


# Masked Language Modeling MLM 屏蔽语言模型任务
class MaskLM(nn.Module):
    def __init__(self, vocab_size, num_hiddens, num_inputs=768, **kwargs):
        super().__init__(**kwargs)
        self.mlp = nn.Sequential(
            nn.Linear(num_inputs, num_hiddens),
            nn.ReLU(),
            # LayerNorm 入参如果是最后一维度的长度，就对最后一维度进行归一化。如果是数组[num_step, vocab_size],那么就对最后两个维度展平做归一化，然后reshape到原来的形状
            nn.LayerNorm(num_hiddens),
            nn.Linear(num_hiddens, vocab_size)
        )

    def forward(self, X, pred_positions):
        num_pred_positions = pred_positions.shape[1]
        pred_positions = pred_positions.reshape(-1)
        batch_size = X.shape[0]
        batch_idx = torch.arange(0, batch_size)
        # batch_idx [0,1] -> [0,0,0,1,1,1]
        batch_idx = torch.repeat_interleave(batch_idx, num_pred_positions)
        # 把要预测的位置的数据取出
        # print(f"batch_idx:{batch_idx}")
        # print(f"pred_positions:{pred_positions}")
        masked_X = X[batch_idx, pred_positions]
        # 自注意力，每个单词位置都有整个句子的上下文信息，所以可以预测当前位置本应该出现的单词
        masked_X = masked_X.reshape((batch_size, num_pred_positions, -1))
        mlm_Y_hat = self.mlp(masked_X)
        return mlm_Y_hat


temp = encoder_1_X[[1, 0, 1], [1, 1, 1]]
print(f"temp:{temp}")
mlm = MaskLM(vocab_size, num_hiddens)
mlm_positions = torch.tensor([[1, 2, 5], [3, 6, 7]])
mlm_Y_hat = mlm(encoder_1_X, mlm_positions)
print(f"mlm_Y_hat.shape:{mlm_Y_hat.shape}")

mlm_Y = torch.tensor([[891, 156, 8897], [1001, 8976, 7749]])
loss = nn.CrossEntropyLoss()
mlm_l = loss(mlm_Y_hat.reshape(-1, vocab_size), mlm_Y.reshape(-1))
# 计算损失时 预测值的分类C(也就是对应vocab_size)要在第二维，num_size在第三维，CrossEntropyLoss类里规定的。或者统一把batch_size和num_size合并到一维。
# mlm_l = loss(mlm_Y_hat.permute(0,2,1), mlm_Y)
print(f"mlm_l.shape:{mlm_l.shape}")


# next sentence predition
class NextSentencePred(nn.Module):
    def __init__(self, num_inputs, **kwargs):
        super().__init__(**kwargs)
        self.output = nn.Linear(num_inputs, 2)

    def forward(self, X):
        # X 形状：(batch_size, num_hiddens)
        return self.output(X)


encoder_1_X = torch.flatten(encoder_1_X, start_dim=1)
nsp = NextSentencePred(encoder_1_X.shape[-1])
nsp_Y_hat = nsp(encoder_1_X)
print(f"nsp_Y_hat.shape:{nsp_Y_hat.shape}")
nsp_Y = torch.tensor([0, 1])
nsp_l = loss(nsp_Y_hat, nsp_Y)


# 整合代码
class BERTModel(nn.Module):
    def __init__(self, vocab_size, num_hiddens, norm_shape, ffn_num_hiddens, num_heads, num_layers, dropout,
                 max_len=1000, key_size=768, query_size=768, value_size=768,
                 hid_in_features=768, mlm_in_features=768, nsp_in_features=768, **kwargs):
        super().__init__(**kwargs)
        self.encoder = BERTEncoder(vocab_size, num_hiddens, norm_shape, ffn_num_hiddens, num_heads, num_layers, dropout,
                                   max_len, key_size, query_size, value_size)
        self.hidden = nn.Sequential(nn.Linear(hid_in_features, num_hiddens), nn.Tanh())
        self.mlm = MaskLM(vocab_size, num_hiddens, mlm_in_features)
        self.nsp = NextSentencePred(nsp_in_features)

    def forward(self, tokens, segments, valid_lens=None, pred_positions=None):
        encoder_X = self.encoder(tokens, segments, valid_lens)
        if pred_positions is not None:
            mlm_Y_hat = self.mlm(encoder_X, pred_positions)
        else:
            mlm_Y_hat = None

        # 因为是自注意力，用任意一个位置的索引，加密的数据都包含整个句子的信息，这里用第0位，也就是<cls>的信息
        nsp_Y_hat = self.nsp(self.hidden(encoder_X[:, 0, :]))
        return encoder_X, mlm_Y_hat, nsp_Y_hat


testArrar = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
random.shuffle(testArrar)
# shuffle只打乱第一维
print(testArrar)

test1 = [1, 2, 3]
test2 = [1] * 6
test3 = test1 + test2
print(f"test2:{test2}")
print(f"test3:{test3}")


def _read_wiki(data_dir, text_name):
    file_name = os.path.join(data_dir, text_name)
    with open(file_name, "r", encoding="UTF-8") as f:
        lines = f.readlines()

    paragraphs = [line.lower().strip().split(" . ") for line in lines if line if " . " in line]
    random.shuffle(paragraphs)
    return paragraphs


# res = _read_wiki("/Users/weiliang/Documents/learn/人工智能/NLP/data/wikitext-2", "wiki.train.tokens")
# print(res)

# 生成下一句预测任务(nsp)的数据
def _get_next_sentence(sentence, next_sentence, paragraphs):
    if random.random() < 0.5:
        is_next = True
    else:
        is_next = False
        next_sentence = random.choice(random.choice(paragraphs))
    return sentence, next_sentence, is_next


def _get_nsp_data_from_paragraph(paragraph, paragraphs, vocab, max_len):
    nsp_data = []
    for i in range(len(paragraph) - 1):
        token_a, token_b, is_next = _get_next_sentence(paragraph[i], paragraph[i + 1], paragraphs)
        if len(token_a) + len(token_b) + 3 > max_len:
            continue

        tokens, segments = get_tokens_and_segments(token_a, token_b)
        nsp_data.append((tokens, segments, is_next))
    return nsp_data


# 生成屏蔽语言模型任务(mlm)的数据
def _replace_mlm_tokens(tokens, candidate_pred_positions, vocab):
    # 创建副本进行替换
    mlm_input_tokens = [token for token in tokens]
    pred_positions_and_labels = []
    for mlm_pred_position in candidate_pred_positions:
        # 80% 替换成<mask>
        r_num = random.random()
        if r_num < 0.8:
            masked_token = "<mask>"
        elif r_num < 0.9:
            masked_token = tokens[mlm_pred_position]
        else:
            masked_token = random.choice(vocab.idx_to_token)

        mlm_input_tokens[mlm_pred_position] = masked_token
        pred_positions_and_labels.append((mlm_pred_position, tokens[mlm_pred_position]))
    return mlm_input_tokens, pred_positions_and_labels


def _get_mlm_data_from_tokens(tokens, vocab):
    candidate_pred_positions = []
    for i, token in enumerate(tokens):
        if token in ["<cls>", "sep"]:
            continue
        candidate_pred_positions.append(i)
    num_mlm_preds = max(1, round(len(tokens) * 0.15))
    random.shuffle(candidate_pred_positions)
    mlm_input_tokens, pred_positions_and_labels = _replace_mlm_tokens(tokens, candidate_pred_positions[:num_mlm_preds],
                                                                      vocab)
    pred_positions_and_labels = sorted(pred_positions_and_labels, key=lambda x: x[0])
    pred_positions = [v[0] for v in pred_positions_and_labels]
    pred_labels = [v[1] for v in pred_positions_and_labels]
    return vocab[mlm_input_tokens], pred_positions, vocab[pred_labels]


# 将文本转换为预训练集合
def _pad_bert_inputs(examples, max_len, vocab):
    max_num_mlm_preds = round(max_len * 0.15)
    all_tokens_ids, all_segments, valid_lens = [], [], []
    all_pred_positions, all_mlm_weights, all_mlm_labels = [], [], []
    nsp_labels = []
    for (token_ids, pred_positions, pred_label_ids, segments, is_next) in examples:
        pad_size = max_len - len(token_ids)
        all_tokens_ids.append(torch.tensor(token_ids + [vocab["<pad>"]] * pad_size, dtype=torch.long))
        all_segments.append(torch.tensor(segments + [0] * pad_size, dtype=torch.long))
        # valid_lens 不包含<pad>的计数
        valid_lens.append(torch.tensor(len(token_ids), dtype=torch.float32))
        # pred_positions的长度是句子原本长度15%， max_num_mlm_preds是句子填充后长度的15%
        mlm_pad_size = max_num_mlm_preds - len(pred_positions)
        all_pred_positions.append(torch.tensor(pred_positions + [0] * mlm_pad_size, dtype=torch.long))
        all_mlm_weights.append(torch.tensor([1.0] * len(pred_positions) + [0.0] * mlm_pad_size, dtype=torch.float32))
        all_mlm_labels.append(torch.tensor(pred_label_ids + [0] * mlm_pad_size, dtype=torch.long))
        nsp_labels.append(torch.tensor(is_next, dtype=torch.long))
    return all_tokens_ids, all_segments, valid_lens, all_pred_positions, all_mlm_weights, all_mlm_labels, nsp_labels


class _WikiTextDataset(Dataset):
    def __init__(self, paragraphs, max_len):
        paragraphs = [dltools.tokenize(paragraph, "word") for paragraph in paragraphs]
        sentences = [sentence for paragraph in paragraphs for sentence in paragraph]
        self.vocab = dltools.Vocab(sentences, min_freq=5, reserved_tokens=["<pad>", "<mask>", "<cls>", "<seq>"])

        # 获取下一句预测任务(nsp)的数据
        examples = []
        for paragraph in paragraphs:
            # append是将单个元素增加到集合末尾，extend将一个集合拼接到另一个集合末尾
            examples.extend(_get_nsp_data_from_paragraph(paragraph, paragraphs, self.vocab, max_len))

        # 获取屏蔽语言模型任务(mlm)的数据
        examples = [_get_mlm_data_from_tokens(_tokens, self.vocab) + (_segments, _is_next) for
                    (_tokens, _segments, _is_next) in examples]

        # 填充输入
        self.all_tokens_ids, self.all_segments, self.valid_lens, self.all_pred_positions, self.all_mlm_weights, self.all_mlm_labels, self.nsp_labels = _pad_bert_inputs(
            examples, max_len, self.vocab)

    def __getitem__(self, idx):
        return self.all_tokens_ids[idx], self.all_segments[idx], self.valid_lens[idx], self.all_pred_positions[idx], \
            self.all_mlm_weights[idx], self.all_mlm_labels[idx], self.nsp_labels[idx]

    def __len__(self):
        return len(self.all_tokens_ids)


def load_data_wiki(batch_size, max_len):
    num_workers = dltools.get_dataloader_workers()
    paragraphs = _read_wiki("./MNIST/wikitext-2", "wiki.train.tokens")
    train_set = _WikiTextDataset(paragraphs, max_len)
    train_iter = torch.utils.data.DataLoader(train_set, batch_size, shuffle=True, num_workers=num_workers)
    return train_iter, train_set.vocab


batch_size, max_len = 1, 64

train_iter, vocab = load_data_wiki(batch_size, max_len)
for (X_tokens, X_segments, X_valid_len, X_pred_positions, X_mlm_weights, X_mlm_labels, X_nsp_label) in train_iter:
    print(f"X_tokens.shape:{X_tokens.shape}")
    print(f"X_tokens:{X_tokens}")
    print(f"X_segments.shape:{X_segments.shape}")
    print(f"X_segments:{X_segments}")
    print(f"X_valid_len.shape:{X_valid_len.shape}")
    print(f"X_valid_len:{X_valid_len}")
    print(f"X_pred_positions.shape:{X_pred_positions.shape}")
    print(f"X_pred_positions:{X_pred_positions}")
    print(f"X_mlm_weights.shape:{X_mlm_weights.shape}")
    print(f"X_mlm_weights:{X_mlm_weights}")
    print(f"X_mlm_labels.shape:{X_mlm_labels.shape}")
    print(f"X_mlm_labels:{X_mlm_labels}")
    print(f"X_nsp_label.shape:{X_nsp_label.shape}")
    print(f"X_nsp_label:{X_nsp_label}")
    break

print(f"len(vocab):{len(vocab)}")

test111 = torch.ones((3, 4, 5))
print(test111[:, 0, :])

net_1 = BERTModel(len(vocab), num_hiddens=128, norm_shape=128, ffn_num_hiddens=128, num_heads=2, num_layers=2,
                  dropout=0.1, key_size=128,
                  query_size=128, value_size=128, hid_in_features=128, mlm_in_features=128, nsp_in_features=128)

divice = dltools.try_gpu()
loss = nn.CrossEntropyLoss(reduction="none")


def _get_bert_batch_loss_bert(loss, vocab_size, mlm_Y_hat, nsp_Y_hat,
                              mlm_weights_X, mlm_Y, nsp_Y):
    # print(f"----------")
    # 计算屏蔽语言模型(mlm)损失
    mlm_l = loss(mlm_Y_hat.reshape(-1, vocab_size), mlm_Y.reshape(-1))
    # print(f"mlm_l 1:{mlm_l}")
    mlm_l = mlm_l * mlm_weights_X.reshape(-1)
    # print(f"mlm_l 2:{mlm_l}")
    mlm_l = mlm_l.sum() / (mlm_weights_X.sum() + 1e-8)
    # print(f"mlm_l 3:{mlm_l}")
    # 计算下一句预测任务的损失
    nsp_l = loss(nsp_Y_hat, nsp_Y).sum()
    # print(f"nsp_l:{nsp_l}")
    l = mlm_l + nsp_l
    return mlm_l, nsp_l, l


def train_bert(train_iter, net, loss, vocab_size, device, num_steps):
    net = net.to(device)
    optimer = torch.optim.Adam(net.parameters(), lr=0.01)
    step, timer = 0, dltools.Timer()
    # 屏蔽语言模型损失，下一句预测损失，句子对数量，计数
    metric = dltools.Accumulator(4)
    for tokens_X, segments_X, valid_lens_X, pred_positions_X, mlm_weights_X, mlm_Y, nsp_Y in train_iter:
        tokens_X = tokens_X.to(device)
        segments_X = segments_X.to(device)
        valid_lens_X = valid_lens_X.to(device)
        pred_positions_X = pred_positions_X.to(device)
        mlm_weights_X = mlm_weights_X.to(device)
        mlm_Y = mlm_Y.to(device)
        nsp_Y = nsp_Y.to(device)
        optimer.zero_grad()
        timer.start()
        _, mlm_Y_hat, nsp_Y_hat = net(tokens_X, segments_X, valid_lens_X.reshape(-1), pred_positions_X)
        mlm_l, nsp_l, l = _get_bert_batch_loss_bert(loss, vocab_size, mlm_Y_hat, nsp_Y_hat, mlm_weights_X, mlm_Y,
                                                    nsp_Y)
        l.backward()
        optimer.step()
        timer.stop()
        batch_size = tokens_X.shape[0]
        step += 1
        metric.add(mlm_l, nsp_l, batch_size, 1)
        if step % 10 == 0:
            print(f"step:{step} mlm_l:{metric[0] / metric[3]:.3f}, nsp_l:{metric[1] / metric[3]:.3f}")
        if step >= num_steps:
            break

    print(f"{metric[2] / timer.sum():.1f} sentence pairs/sec")


train_bert(train_iter, net_1, loss, len(vocab), divice, 100)


def get_bert_encoding(net, tokens_a, tokens_b=None):
    tokens, segments = get_tokens_and_segments(tokens_a, tokens_b)
    token_ids = torch.tensor(vocab[tokens]).unsqueeze(0).to(divice)
    segments = torch.tensor(segments).unsqueeze(0).to(divice)
    valid_len = torch.tensor([len(tokens)]).to(divice)
    encoded_X, _, _ = net(token_ids, segments, valid_len)
    return encoded_X


tokens_a = ["a", "crane", "is", "flying"]
encoded_text_a = get_bert_encoding(net_1, tokens_a)
# 词元："<cls>", "a", "crane", "is", "flying", "<sep>"
encoded_text_a_crane = encoded_text_a[:, 2, :]
print(f"encoded_text_a_crane.shape:{encoded_text_a_crane.shape}")
print(f"encoded_text_a_crane:{encoded_text_a_crane}")

tokens_a_1, tokens_b_2 = ["a", "crane", "driver", "came"], ["he", "just", "left"]
encoded_pair = get_bert_encoding(net_1, tokens_a_1, tokens_b_2)
encoded_pair_crane = encoded_pair[:, 2, :]
print(f"encoded_pair_crane.shape:{encoded_pair_crane.shape}")
print(f"encoded_pair_crane:{encoded_pair_crane}")


def load_pretrained_model():
    data_dir = "./MNIST/bert.small.torch/"
    vocab = dltools.Vocab()
    vocab.idx_to_token = json.load(open(os.path.join(data_dir, "vocab.json")))
    vocab.token_to_idx = {token: idx for idx, token in enumerate(vocab.idx_to_token)}
    bert_1 = BERTModel(vocab_size=len(vocab), num_hiddens=256, norm_shape=256, ffn_num_hiddens=512, num_heads=4,
                       num_layers=2, dropout=0.1,
                       max_len=512, key_size=256, query_size=256, value_size=256,
                       hid_in_features=256, mlm_in_features=256, nsp_in_features=256)
    # 加载预训练模型
    bert_1.load_state_dict(torch.load(os.path.join(data_dir, "pretrained.params")))
    return bert_1, vocab


bert_1, vocab_1 = load_pretrained_model()

# stanford nutural language inference
class SNLIBertDataSet(torch.utils.data.Dataset):
    def __init__(self, dataSet, max_len, vocab=None):
        self.vocab = vocab
        self.max_len = max_len
        premises, hypothesis, labels = dataSet
        self.labels = torch.tensor(labels)
        all_premise_hypotheses_tokens = []
        for i, premise in enumerate(premises):
            hypotheses = hypothesis[i]
            all_premise_hypotheses_tokens.append([premise.lower().split(), hypotheses.lower().split()])
        self._preprocess(all_premise_hypotheses_tokens)

    def _preprocess(self, all_premise_hypotheses_tokens):
        all_token_ids = []
        all_segments = []
        valid_lens = []
        for temp_1 in all_premise_hypotheses_tokens:
            p_tokens, h_tokens = temp_1[0], temp_1[1]
            self._truncate_pair_of_tokens(p_tokens, h_tokens)
            tokens_1, segments_1 = get_tokens_and_segments(p_tokens, h_tokens)
            valid_len = len(tokens_1)
            all_token_ids.append(self.vocab[tokens_1] + [self.vocab["<pad>"]] * (self.max_len - valid_len))
            all_segments.append(segments_1 + [0] * (self.max_len - valid_len))
            valid_lens.append(valid_len)
        self.all_token_ids = torch.tensor(all_token_ids, dtype=torch.long)
        self.all_segments = torch.tensor(all_segments, dtype=torch.long)
        self.valid_lens = torch.tensor(valid_lens)
        print(f"read:{len(all_token_ids)} examples")

    def __getitem__(self, item):
        # print(f"item:{item} labels[item]:{self.labels[item]}")
        return self.all_token_ids[item], self.all_segments[item], self.valid_lens[item], self.labels[item]

    def __len__(self):
        return len(self.all_token_ids)

    def _truncate_pair_of_tokens(self, p_tokens, h_tokens):
        while len(p_tokens) + len(h_tokens) > self.max_len -3:
            if len(p_tokens) > len(h_tokens):
                p_tokens.pop()
            else:
                h_tokens.pop()


def read_snli(data_dir, is_train=True):
    def extract_text(s):
        s = re.sub("\\(", "", s)
        s = re.sub("\\)", "", s)
        s = re.sub("\\s{2,}", " ", s)
        return s.strip()

    label_set = {"entailment":0, "contradiction":1, "neutral" : 2}
    file_name = os.path.join(data_dir, "snli_1.0_dev.txt" if is_train else "snli_1.0_dev.txt")
    with open(file_name, "r") as file:
        rows = [row.split("\t") for row in file.readlines()[1:]]
    premises = [extract_text(row[1]) for row in rows if row[0] in label_set]
    hypothesis = [extract_text(row[2]) for row in rows if row[0] in label_set]
    labels = [label_set[row[0]] for row in rows if row[0] in label_set]
    return premises, hypothesis, labels


batch_size, max_len = 64, 128
train_set_1 = SNLIBertDataSet(read_snli("./MNIST/snli_1.0"), max_len, vocab_1)
test_set_1 = SNLIBertDataSet(read_snli("./MNIST/snli_1.0", False), max_len, vocab_1)
train_iter1 = torch.utils.data.DataLoader(train_set_1, batch_size, shuffle=True)
test_iter1 = torch.utils.data.DataLoader(test_set_1, batch_size, shuffle=False)

for temp in train_iter1:
#    print(f"temp:{temp}")
    break
class BERTClassifier(nn.Module):
    def __init__(self, bert):
        super().__init__()
        self.encoder = bert.encoder
        self.hidden = bert.hidden
        self.output = nn.Linear(256, 3)

    def forward(self, tokens_X, segments_X, valid_lens_X):
        encoded_X = self.encoder(tokens_X, segments_X, valid_lens_X)
        return self.output(self.hidden(encoded_X[:,0,:]))

net_3 = BERTClassifier(bert_1)
lr, num_epochs=1e-4, 5
trainer = torch.optim.Adam(net_3.parameters(), lr=lr)
loss_fn = nn.CrossEntropyLoss()

# 训练微调模型
def train_fine_tune(net, train_iter, test_iter, loss_fn, trainer, num_epochs, device):
    timer, num_batches = dltools.Timer(), len(train_iter)
    net = net.to(device)
    for epoch in range(num_epochs):
        metric = dltools.Accumulator(3)
        net.train()
        for i, (token_ids, segment, valid_len, label) in enumerate(train_iter):
            timer.start()
            token_ids = token_ids.to(device)
            segment =segment.to(device)
            valid_len = valid_len.to(device)
            label = label.to(device)
            trainer.zero_grad()
            pred = net(token_ids, segment, valid_len)
            l = loss_fn(pred, label)
            l.backward()
            trainer.step()
            metric.add(l, dltools.accuracy(pred, label), len(label))
        net.eval()
        #test_acc = dltools.evaluate_accuracy_gpu(net, test_iter, device)
        print(f"loss:{metric[0]/metric[2]:.3f} train acc:{metric[1]/metric[2]:.3f}")
        # print(f"test_acc:{test_acc}")
        print("------------------")

train_fine_tune(net_3, train_iter1, test_iter1, loss_fn, trainer, num_epochs, divice)