import os

import matplotlib.pyplot as plt
import torch
from torch import nn
import dltools
from NLP_GloVe_fastText import TokenEmbedding


def read_imdb(data_dir, is_train):
    data, labels = [], []
    for label in ["pos", "neg"]:
        folder_name = os.path.join(data_dir, "train" if is_train else "test", label)
        for file in os.listdir(folder_name):
            with open(os.path.join(folder_name, file), "r+", encoding="utf-8") as f:
                review = f.read().strip()
                data.append(review)
                labels.append(1 if label == "pos" else 0)
    return data, labels


"""

data_dir = r"./MNIST/aclImdb"
train_data, train_labels = read_imdb(data_dir, is_train=True)
print(f"num:{len(train_data)}")

for x, y in zip(train_data[:3], train_labels[:3]):
    print(f"x:{x}, y:{y}")
# 分词
train_tokens = dltools.tokenize(train_data, token="word")
vocab = dltools.Vocab(train_tokens, min_freq=5, reserved_tokens=["<pad>"])
print(f"len(vocab):{len(vocab)}")
dltools.set_figsize()
plt.xlabel("# tokens per review")
plt.ylabel("count")
plt.hist([len(line) for line in train_tokens], bins=range(0, 1000, 50))
# plt.show()

num_steps = 500
train_features = torch.tensor([dltools.truncate_pad(vocab[line], num_steps, vocab["<pad>"]) for line in train_tokens])
print(f"train_features.shape:{train_features.shape}")
train_iter = dltools.load_array((train_features, torch.tensor(train_labels)), 64)

for X, y in train_iter:
    print(f"X.shape:{X.shape}")
    print(f"y.shape:{y.shape}")
    break
"""


def load_data_imdb(data_dir, batch_size, num_steps=500):
    train_data, train_labels = read_imdb(data_dir, is_train=True)
    test_data, test_labels = read_imdb(data_dir, is_train=False)
    train_tokens = dltools.tokenize(train_data, token="word")
    test_tokens = dltools.tokenize(test_data, token="word")
    vocab = dltools.Vocab(train_tokens + test_tokens, min_freq=5, reserved_tokens=["<pad>"])
    train_features = torch.tensor(
        [dltools.truncate_pad(vocab[line], num_steps, vocab["<pad>"]) for line in train_tokens])
    test_features = torch.tensor([dltools.truncate_pad(vocab[line], num_steps, vocab["<pad>"]) for line in test_tokens])
    train_iter = dltools.load_array((train_features, torch.tensor(train_labels)), 64)
    test_iter = dltools.load_array((test_features, torch.tensor(test_labels)), 64)
    return train_iter, test_iter, vocab


data_dir = r"./MNIST/aclImdb"
train_iter, test_iter, vocab = load_data_imdb(data_dir, 64, 128)

for X, y in test_iter:
    print(f"X.shape:{X.shape}")
    print(f"y.shape:{y.shape}")
    break

test1 = torch.tensor([[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
                      [[11, 12, 13], [14, 15, 16], [17, 18, 19]]])
print(f"test1.shape:{test1.shape}")
print(f"test1.shapetest1[:,0]:{test1[:, 0]}")
print(torch.cat((test1[:, 0], test1[:, -1]), dim=1))


def accuracy(pred:torch.tensor, labels:torch.tensor):
    # print(f"pred.shape:{pred.shape}")
    # print(f"labels.shape:{labels.shape}")
    return (pred.cpu().numpy() == labels.cpu().numpy()).mean()
testa = torch.tensor([[1,0,1],[1,1,1]])
testb = torch.tensor([[1,1,1],[0,1,0]])
testc = testa - testb
testd = testc == 0
print(f"testd.sum():{testd.sum()}")
print(f"accuracy:{accuracy(testa, testb)}")


class BiRNN(nn.Module):
    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, **kwargs):
        super().__init__(**kwargs)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.encoder = nn.LSTM(embed_size, num_hiddens, num_layers=num_layers, bidirectional=True, batch_first=True)
        self.decoder = nn.Linear(4 * num_hiddens, 1)
        # 换Sigmoid
        self.sigmoid = nn.Sigmoid()

    def forward(self, inputs):
        # inputs (batchSize, num_steps)
        embedding = self.embedding(inputs)
        # 调用flatten_parameters，可以把内存中parameters放在连续的位置，提高内存利用率和效率
        self.encoder.flatten_parameters()
        # outputs (batchSize, num_steps, 2*num_hiddens) 双向的num_hiddens要乘以2
        outputs, state = self.encoder(embedding)
        encoding = torch.cat((outputs[:, 0], outputs[:, -1]), dim=1)
        return self.sigmoid(self.decoder(encoding))


embed_size, num_hiddens, num_layers = 100, 100, 2
device = dltools.try_gpu()
net = BiRNN(len(vocab), embed_size, num_hiddens, num_layers)

def init_weights(m:nn.Module):
    if type(m) == nn.Linear:
        nn.init.xavier_uniform_(m.weight)
    elif type(m) == nn.LSTM:
        for param in m._flat_weights_names:
            if "weight" in param:
                nn.init.xavier_uniform_(m._parameters[param])

net.apply(init_weights)
tokenEmbedding = TokenEmbedding(r"./MNIST/glove.6B.100d.txt")
embeds = tokenEmbedding[vocab.idx_to_token]
print(f"embeds.shape:{embeds.shape}")
net.embedding.weight.data.copy_(embeds)
net.embedding.weight.requires_grad = False

lr, num_epochs = 0.01, 10
trainer = torch.optim.Adam(net.parameters(), lr=lr)
#loss = nn.CrossEntropyLoss()
loss = nn.BCELoss()
#CrossEntropyLoss和BCELoss的区别 https://blog.csdn.net/loki2018/article/details/127210390

net = net.to(device)
for epoch in range(num_epochs):
    for i, (features, labels) in enumerate(train_iter):
        features = features.to(device)
        labels = torch.tensor(labels.to(device), dtype=torch.float32)
        net.train()
        trainer.zero_grad()
        pred = net(features)
        l = loss(pred, labels.reshape(pred.shape))
        l.backward()
        trainer.step()
        if i % 10 == 0:
            # torch.argmax() 处理最大值索引 torch.softmax()处理分数转换为概率分布（概率之和等于1） torch.max()同时返回最大值和索引。
            # print(f"epoch:{epoch} i:{i} l:{l} acc:{accuracy(torch.argmax(pred, dim=-1), labels):.3f}")
            print(f"epoch:{epoch} i:{i} l:{l} acc:{accuracy(pred > 0.5, labels.reshape(pred.shape)):.3f}")

    net.eval()
    all_preds = []
    all_labels = []
    for (features, labels) in test_iter:
        features = features.to(device)
        labels = labels.to(device)
        with torch.no_grad():
            pred = net(features)
            # all_preds.extend(pred.argmax(dim=-1))
            all_preds.extend(pred > 0.5)
            all_labels.extend(labels.reshape(pred.shape))
    print(f"epoch:{epoch} test_acc:{accuracy(torch.tensor(all_preds), torch.tensor(all_labels)):.3f}")