import time
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext.datasets import IMDB
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
from torchtext.data.functional import to_map_style_dataset
from torchtext.transforms import VocabTransform, ToTensor


# 1-negative 2-positive
def collate_batch(batch):
    label_list, text_list = [], []
    for _label, _text in batch:
        label_list.append(label_transform(_label))
        processed_text = torch.tensor(text_transform(_text))  # 将文本转换为索引序列
        text_list.append(processed_text)

    # 将所有句子填充到固定长度 max_seq_len
    texts = pad_sequence(text_list, padding_value=vocab["<pad>"], batch_first=True)

    # 如果句子长度超过 max_seq_len，截断
    if texts.size(1) > MAX_SEQ_LEN:
        texts = texts[:, :MAX_SEQ_LEN]
    # 如果句子长度不足 max_seq_len，填充
    elif texts.size(1) < MAX_SEQ_LEN:
        pad_size = MAX_SEQ_LEN - texts.size(1)
        texts = torch.nn.functional.pad(texts, (0, pad_size), value=vocab["<pad>"])

    labels = torch.tensor(label_list)
    # print(labels)
    return labels, texts

class TransformerModel(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_heads, num_layers, hidden_dim, num_classes, max_seq_len, dropout=0.5):
        super(TransformerModel, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        self.positional_encoding = nn.Parameter(torch.zeros(1, max_seq_len, embed_dim))
        self.transformer = nn.Transformer(d_model=embed_dim, nhead=num_heads, num_encoder_layers=num_layers,
                                          num_decoder_layers=num_layers, dim_feedforward=hidden_dim, dropout=dropout)
        self.dropout = nn.Dropout(dropout)
        self.max_seq_len = max_seq_len
        self.fc = nn.Linear(embed_dim, num_classes)  # 128,2

    def forward(self, x):
        # print('x shape:', x.shape)
        seq_len = x.size(1)
        if seq_len > self.max_seq_len:
            x = x[:, :self.max_seq_len]
            seq_len = self.max_seq_len

        x = self.embedding(x) + self.positional_encoding[:, :seq_len, :]
        # print('x shape after embedding:', x.shape)
        x = self.dropout(x)
        # print('x shape after dropout:', x.shape)
        x = x.permute(1, 0, 2)  # (seq_len, batch_size, embed_dim)
        # print('x shape after permute:', x.shape)
        x = self.transformer(x, x)  # Self-attention
        # print('x shape after transformer:', x.shape)
        x = x.permute(1, 0, 2)  # (batch_size, seq_len, embed_dim)
        # print('x shape after permute:', x.shape)
        x = x.mean(dim=1)  # Global average pooling
        # print('x shape after mean:', x.shape)
        x = self.fc(x)
        # print('x shape after fc:', x.shape)
        return x


# 构建词汇表
def yield_tokens(data_iter):
    for _, text in data_iter:
        yield tokenizer(text)
# 定义 collate 函数

BATCH_SIZE = 64
# 定义文本转换函数
text_transform = lambda x: vocab(tokenizer(x))
# 2:positive , 1:negative
label_transform = lambda x: x-1
# label_transform = lambda x: 1 if x == 2 else 0

if __name__ == '__main__':
    # 定义 tokenizer
    tokenizer = get_tokenizer("basic_english")

    print('加载 IMDB 数据集，数据集的路径为：C:\\Users\\Administrator\\.cache\\torch\\text\\datasets\\IMDB\\aclImdb_v1\\{train,test}')
    train_iter = IMDB(split='train')

    # 将数据集转换为 map-style 数据集
    train_dataset = to_map_style_dataset(train_iter)

    vocab = build_vocab_from_iterator(yield_tokens(train_dataset), specials=["<unk>", "<pad>"])
    # vocab = torch.load('./datasets/IMDB/aclImdb_v1/imdb.vocab')
    vocab.set_default_index(vocab["<unk>"])
    # torch.save(vocab, 'vocab.pth')

    train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate_batch)

    # 模型参数
    VOCAB_SIZE = len(vocab)

    # VOCAB_SIZE = 20000
    print('VOCAB_SIZE:{}', VOCAB_SIZE)
    EMBED_DIM = 128
    NUM_HEADS = 8
    NUM_LAYERS = 2
    HIDDEN_DIM = 512
    MAX_SEQ_LEN = 512
    NUM_CLASSES = 2
    DROPOUT = 0.5

    NUM_EPOCHS = 1

    start_time = time.time()
    #                         100684       128      8          2           512         2            512          0.1
    model = TransformerModel(VOCAB_SIZE, EMBED_DIM, NUM_HEADS, NUM_LAYERS, HIDDEN_DIM, NUM_CLASSES, MAX_SEQ_LEN, DROPOUT)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=1e-4, weight_decay=1e-5)

    # train_loader = test_loader = torch.tensor([(1,54,99,188),(0, 18,321, 20)])
    print('start training...')
    for epoch in range(NUM_EPOCHS):
        model.train()
        batch_num = 1
        for labels, texts in train_loader:
            optimizer.zero_grad()
            outputs = model(texts)
            # print(f'labels: {labels}')
            loss = criterion(outputs, labels)
            print(f'outputs: {outputs.item()}, loss: {loss.item()}')
            loss.backward()
            optimizer.step()
            # print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '.format(epoch, (time.time() - start_time), loss.item()))
            cost_time = time.time() - start_time
            print(f'epoch [{epoch:>2}/{NUM_EPOCHS:>2}], batch num [{batch_num:>5}] time: {cost_time:>10.2f}, valid loss {loss.item():>10.6f}')
            batch_num += 1
        print(f'Epoch {epoch + 1}/{NUM_EPOCHS}, Loss: {loss.item()}')
        torch.save(model, 'my_first_transformer_from_deepseek_{}.pth'.format(epoch))
