import torch
import torch.nn as nn
import torchvision
from torchvision import transforms, datasets
import os, PIL, pathlib, warnings

warnings.filterwarnings('ignore')
device = torch.device('cua' if torch.cuda.is_available() else 'cpu')

print(device)

from torchtext.datasets import AG_NEWS
train_iter, test_iter = AG_NEWS()

print(train_iter)


from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator

# 分词
tokenizer = get_tokenizer('basic_english')

def yield_tokens(data_iter):
    for _, text in data_iter:
        yield tokenizer(text)

vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=['<unk>'])
vocab.set_default_index(vocab['<unk>']) # 如果找不到单词，则会选择默认的索引

print(vocab(['here', 'is', 'an', 'example']))

text_pipeline = lambda x: vocab(tokenizer(x))
label_pipeline = lambda x: int(x) - 1
print(text_pipeline('here is the an example'))

print(label_pipeline('10'))

from torch.utils.data import DataLoader
def collate_batch(batch):
    label_list, text_list, offset = [], [], [0]

    for(_label, _text) in batch:
        # 标签列表
        label_list.append(label_pipeline(_label))

        # 文本列表
        processed_text = torch.tensor(text_pipeline(_text), dtype=torch.int64)
        text_list.append(processed_text)

        # 偏移量， 集语句的总词汇量
        offset.append(processed_text.size(0))

    label_list = torch.tensor(label_list, dtype=torch.int64)
    text_list = torch.tensor(text_list)
    offset = torch.tensor(offset[:-1]).cumsum(dim=0)

    return label_list.to(device), text_list.to(device), offset.to(device)

dataloader = DataLoader(
    train_iter,
    batch_size=8,
    shuffle=False,
    collate_fn=collate_batch
)

from torch import nn
class TextClassificationModel(nn.Module):
    def __init__(self, vocab_size, embed_dim, num_class):
        super(TextClassificationModel, self).__init__()
        self.embedding = nn.EmbeddingBag(
            vocab_size # 词典大小
            ,embed_dim # 嵌入的维度
            ,sparse=False
        )
        self.fc = nn.Linear(embed_dim, num_class)
        self.init_weights()

    def init_weights(self):
        initrange = 0.5
        self.embedding.weight.data.uniform_(-initrange, initrange)
        self.fc.weight.data.unifor_(-initrange, initrange)
        self.fc.bias.data.zero_()

    def forward(self, text, offset):
        embedded = self.embedding(text, offset)
        return self.fc(embedded)


num_class = len(set([label for (label, text) in train_iter]))
vocab_size = len(vocab)

em_size = 64
model = TextClassificationModel(vocab_size, em_size, num_class).to(device)

import time
def train(dataloader,optimizer,criterion,epoch):
    model.train()
    total_acc, train_loss, total_count = 0, 0, 0
    log_interval = 500
    start_time = time.time()

    for idx, (label, text, offset) in enumerate(dataloader):
        predicted_label = model(text, offset)
        optimizer.zero_grad() # grad 属性归零
        loss = criterion(predicted_label, label) # 计算网络输出和真实值之间的差距
        loss.backward() # 反向传播
        optimizer.step() # 每一步自动更新

        # 记录 acc 和 loss
        total_acc += (predicted_label.argmax(1) == label).sum().item()
        train_loss += loss.item()
        total_count += label.size(0)

        if idx % log_interval == 0 and idx > 0:
            elapsed = time.time() - start_time
            print('epoch {:1d},  {:4d}/{:4d} batches , train_acc {:4.3f} train_loss {:4.5f}'.format(
                epoch, idx, len(dataloader),
                total_acc / total_count, train_loss / total_count)
            )
            total_acc, train_loss, total_count = 0,0,0
            start_time =  time.time()

def evaluate(dataloader, criterion):
    model.eval()
    total_acc, train_loss, total_count = 0, 0, 0
    with torch.no_grad():
        for idx, (label, text, offsets) in enumerate(dataloader):
            predicted_label = model(text, offsets)
            loss = criterion(predicted_label, label)
            total_acc += (predicted_label.argmax(1) == label).sum().item()
            train_loss += loss.item()
            total_count += label.size(0)

    return total_acc / total_count, train_loss / total_count

from torch.utils.data.dataset import random_split
from torchtext.data.functional import to_map_style_dataset
epochs = 10
lr = 5
batch_size = 64
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
total_accu = None

train_dataset = to_map_style_dataset(train_iter)
test_dataset = to_map_style_dataset(test_iter)
num_train = int(len(train_dataset) * 0.95)

split_train_, split_vaild_ = random_split(train_dataset, [num_train, len(train_dataset) - num_train])

train_dataloader = DataLoader(split_train_, batch_size=batch_size, shuffle=True, collate_fn=collate_batch)
vaild_dataloader = DataLoader(split_vaild_, batch_size=batch_size, shuffle=True, collate_fn=collate_batch)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_batch)

for epoch in epochs:
    epoch_start_time = time.time()
    train(train_dataloader,optimizer,criterion,epoch)
    val_acc, val_loss =evaluate(vaild_dataloader,criterion)

    if total_accu is not None and total_accu > val_acc:
        scheduler.step()
    else:
        total_accu = val_acc

    print('epoch {:1d}, time: {:4.2f}s, valid_acc {:4.3f} valid_loss {:4.3f}'
          .format(epoch,time.time() - epoch_start_time,val_acc, val_loss))

test_acc, test_loss = evaluate(test_dataloader)
print('test accuracy {:8.3f}'.format(test_acc))

