import csv
import gzip

import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence

trainFilename = "../data/Names/names_train.csv.gz"
with gzip.open(trainFilename, "rt") as f:
    reader = csv.reader(f)
    rows = list(reader)
countries = [row[1] for row in rows]
country_list = list(sorted(set(countries)))


def name2list(name: str):
    arr = [ord(c) for c in name]
    return arr, len(arr)


def create_tensor(tensor: torch.Tensor):
    if torch.cuda.is_available():
        return tensor.cuda()
    else:
        return tensor


def make_tensors(names):
    sequences_and_lengths = [name2list(name) for name in names]
    name_sequences = [sl[0] for sl in sequences_and_lengths]
    seq_lengths = torch.LongTensor([sl[1] for sl in sequences_and_lengths])
    seq_tensor = torch.zeros(len(name_sequences), seq_lengths.max().item()).long()
    for idx, (seq, seq_len) in enumerate(zip(name_sequences, seq_lengths)):
        seq_tensor[idx, :seq_len] = torch.LongTensor(seq)
    seq_lengths, perm_idx = seq_lengths.sort(0, descending=True)
    seq_tensor = seq_tensor[perm_idx]
    # 记录还原顺序（把预测结果按原输入顺序还原）
    inv_idx = torch.empty_like(perm_idx)
    inv_idx[perm_idx] = torch.arange(len(perm_idx))
    return create_tensor(seq_tensor), seq_lengths, inv_idx


class RNNClassifier(nn.Module):
    def __init__(self,
                 char_num,
                 input_size,
                 hidden_size,
                 output_size,
                 num_layers=1,
                 batch_first=False,
                 bidirectional=False):
        super(RNNClassifier, self).__init__()
        self.n_directions = 2 if bidirectional else 1
        self.embedding = nn.Embedding(char_num, input_size)
        self.gru = nn.GRU(input_size,
                          hidden_size,
                          num_layers,
                          batch_first=batch_first,
                          bidirectional=bidirectional)
        self.fc = nn.Linear(hidden_size * self.n_directions, output_size)

    def forward(self, input, seq_lengths):
        input = input.t()
        embedding = self.embedding(input)
        gru_input = pack_padded_sequence(embedding, seq_lengths)
        output, hidden = self.gru(gru_input)
        if self.n_directions == 2:
            hidden_cat = torch.cat([hidden[-2], hidden[-1]], dim=1)
        else:
            hidden_cat = hidden[-1]
        fc_output = self.fc(hidden_cat)
        return fc_output


model = RNNClassifier(char_num=128,
                      input_size=128,
                      hidden_size=128,
                      output_size=len(country_list),
                      num_layers=2,
                      batch_first=False,
                      bidirectional=True)

model_path = "../models/name_country_state_dict.pth"
model.load_state_dict(torch.load(model_path))
if torch.cuda.is_available():
    model = model.cuda()

model.eval()

names = ["Satoshi", "Giuseppe", "O'Connor", "Zhang", "Ivan"]
seq_tensor, seq_lengths, inv_idx = make_tensors(names)
output = model(seq_tensor, seq_lengths)
_, indices = output.max(dim=1)
original_order_indices = indices[inv_idx]
print([country_list[idx] for idx in original_order_indices.cpu().numpy()])
