"""

"""
import torch
import torch.nn.functional as F
import torch.nn as nn
import os
import numpy as np

# 模型参数
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
total_epoch = 150  # 150
max_len = 30
batch = 16
learning_rate = 0.001
DROPOUT = 0  # 0.2, 0.3, 0.4
num_fold = 5
embedding_size = 768
lstm_hidden_size = 200

# 数据和保存路径
version = 'Bi-Model'
data_path = '../../data/npy'
model_path = '../../result_and_model/slot'
result_path = 'result_and_model'

# 输入NPY
x_train_npy = os.path.join(data_path, 'train_x0.npy')
x_dev_npy = os.path.join(data_path, 'dev_x0.npy')
x_test_npy = os.path.join(data_path, 'test_x.npy')

# 输入TXT
x_train_txt = os.path.join(data_path, 'train_x0.txt')
x_dev_txt = os.path.join(data_path, 'dev_x0.txt')
x_test_txt = os.path.join(data_path, 'test_x.txt')

# Intent标签
intent_train_npy = os.path.join(data_path, 'train_intent_y0.npy')
intent_dev_npy = os.path.join(data_path, 'dev_intent_y0.npy')
intent_test_npy = os.path.join(data_path, 'test_intent.npy')

# Slot IOB格式
slot_train = os.path.join(data_path, 'train_slot_y0.txt')
slot_dev = os.path.join(data_path, 'dev_slot_y0.txt')
slot_test = os.path.join(data_path, 'test_slot.txt')

slot_vocab_file = os.path.join(data_path, 'slot_vocab.txt')  # 记录所有的Slot，每行一个
dev_result_file = os.path.join(result_path, 'dev_log.csv')

# =======data2index_ver2===========================


def makeindex(x_npy, intent_npy, slot, x_sentence):
    """
    转换成阿拉伯数字表示
    :param x_npy: 词向量
    :param intent_npy: Intent标签
    :param slot: Slot
    :param x_sentence: 文本
    :return: [句子],[实际长度],[Slot],Intent标签
    """
    train_data = []
    train_x = np.load(x_npy)
    train_intent = np.load(intent_npy)
    train_slot = []
    train_len = []
    num_unknown = 0

    # 转换Slot
    with open(slot) as f:
        for line in f.readlines():
            line = line.strip()
            sample_slot = []
            for word_slot in line.split(' '):
                try:
                    sample_slot.append(slot_dict[word_slot])
                except KeyError:
                    sample_slot.append(slot_dict['O'])
                    num_unknown += 1

            if len(sample_slot) < 30:
                sample_slot.extend([slot_dict['<PAD>']] * (30 - len(sample_slot)))  # slot padding

            train_slot.append(sample_slot)
        print('num_Unknown', num_unknown)

    # 句子实际长度
    with open(x_sentence, encoding='utf-8') as f:
        for line in f.readlines():
            line = line.strip()
            train_len.append(len(line) + 1)

    assert len(train_x) == len(train_slot) == len(train_len) == len(train_intent)
    for i in range(len(train_x)):
        train_data.append([train_x[i], train_len[i], train_slot[i], train_intent[i]])

    return train_data


def get_data(fold):
    """
    加载训练数据和验证数据
    :param fold: 第几折
    :return:
    """
    train_data = makeindex(x_train_npy.replace('0', str(fold)), intent_train_npy.replace('0', str(fold)),
                           slot_train.replace('0', str(fold)), x_train_txt.replace('0', str(fold)))
    # 其实是dev_data
    test_data = makeindex(x_dev_npy.replace('0', str(fold)), intent_dev_npy.replace('0', str(fold)),
                          slot_dev.replace('0', str(fold)), x_dev_txt.replace('0', str(fold)))

    return train_data, test_data


def get_test_data():
    """
    加载测试数据
    :return:
    """
    test_data = makeindex(x_test_npy, intent_test_npy, slot_test, x_test_txt)
    return test_data


# =====================================data2index_vecEnd==========================================================

# =====================================make_dict==================================================================

def convert_int(arr):
    try:
        a = int(arr)
    except:
        return None
    return a


# Make words dict
# with open(cfg.train_file) as f:
#     for line in f.readlines():
#         line = line.strip().lower().split()
#
#         for index, item in enumerate(line):
#             word = item.split(':')[0]
#             if word == '<=>':
#                 break
#             if convert_int(word) is not None:
#                 words.append('DIGIT' * len(word))
#             else:
#                 words.append(word)

# words_vocab = sorted(set(words))
# word_dict = {'UNK': 0, 'PAD': 1}

# for i, item in enumerate(words_vocab):
#     word_dict[item] = i + 2

# Make slot tag dict
slot_list = []
with open(slot_train) as f:
    for line in f.readlines():
        for item in line.strip().split(' '):
            if item not in slot_list:
                slot_list.append(item)
with open(slot_dev) as f:
    for line in f.readlines():
        for item in line.strip().split(' '):
            if item not in slot_list:
                slot_list.append(item)
with open(slot_vocab_file, 'w', encoding='utf-8') as f:
    for item in slot_list:
        f.write(item + '\n')
slot_dict = {}
with open(slot_vocab_file) as f:
    for i, line in enumerate(f.readlines()):
        slot_dict[line.strip()] = i
print(slot_dict)

# Make intent dict
intent_dict = {}

# =====================================make_dictEnd===============================================
# =====================================模型代码Bi-model============================================


class slot_enc(nn.Module):
    def __init__(self, embedding_size, lstm_hidden_size):
        super(slot_enc, self).__init__()

        # self.embedding = nn.Embedding(vocab_size, embedding_size).to(device)
        self.lstm = nn.LSTM(input_size=embedding_size, hidden_size=lstm_hidden_size, num_layers=2, \
                            bidirectional=True, batch_first=True)  # , dropout=DROPOUT)

    def forward(self, x):
        # x = self.embedding(x)
        x = F.dropout(x, DROPOUT)
        x, _ = self.lstm(x)
        x = F.dropout(x, DROPOUT)
        return x


class slot_dec(nn.Module):
    def __init__(self, lstm_hidden_size, label_size=len(slot_dict)):
        super(slot_dec, self).__init__()
        self.lstm = nn.LSTM(input_size=lstm_hidden_size * 5, hidden_size=lstm_hidden_size, num_layers=1)
        self.fc = nn.Linear(lstm_hidden_size, label_size)
        self.hidden_size = lstm_hidden_size

    def forward(self, x, hi):
        batch = x.size(0)
        length = x.size(1)
        dec_init_out = torch.zeros(batch, 1, self.hidden_size).to(device)
        hidden_state = (torch.zeros(1, 1, self.hidden_size).to(device), \
                        torch.zeros(1, 1, self.hidden_size).to(device))
        x = torch.cat((x, hi), dim=-1)

        x = x.transpose(1, 0)  # 50 x batch x feature_size
        x = F.dropout(x, DROPOUT)
        all_out = []
        for i in range(length):
            if i == 0:
                out, hidden_state = self.lstm(torch.cat((x[i].unsqueeze(1), dec_init_out), dim=-1), hidden_state)
            else:
                out, hidden_state = self.lstm(torch.cat((x[i].unsqueeze(1), out), dim=-1), hidden_state)
            all_out.append(out)
        output = torch.cat(all_out, dim=1)  # 50 x batch x feature_size
        x = F.dropout(x, DROPOUT)
        res = self.fc(output)
        return res


class intent_enc(nn.Module):
    def __init__(self, embedding_size, lstm_hidden_size):
        super(intent_enc, self).__init__()

        # self.embedding = nn.Embedding(vocab_size, embedding_size).to(device)
        # self.embedding.weight.data.uniform_(-1.0, 1.0)
        self.lstm = nn.LSTM(input_size=embedding_size, hidden_size=lstm_hidden_size, num_layers=2, \
                            bidirectional=True, batch_first=True, dropout=DROPOUT)

    def forward(self, x):
        # x = self.embedding(x)
        x = F.dropout(x, DROPOUT)
        x, _ = self.lstm(x)
        x = F.dropout(x, DROPOUT)
        return x


class intent_dec(nn.Module):
    def __init__(self, lstm_hidden_size, label_size=24):
        super(intent_dec, self).__init__()
        self.lstm = nn.LSTM(input_size=lstm_hidden_size * 4, hidden_size=lstm_hidden_size, batch_first=True,
                            num_layers=1)  # , dropout=DROPOUT)
        self.fc = nn.Linear(lstm_hidden_size, label_size)

    def forward(self, x, hs, real_len):
        batch = x.size()[0]
        real_len = torch.tensor(real_len).to(device)
        x = torch.cat((x, hs), dim=-1)
        x = F.dropout(x, DROPOUT)
        x, _ = self.lstm(x)
        x = F.dropout(x, DROPOUT)

        index = torch.arange(batch).long().to(device)
        state = x[index, real_len - 1, :]

        res = self.fc(state.squeeze())
        return res


class Intent(nn.Module):
    def __init__(self):
        super(Intent, self).__init__()
        self.enc = intent_enc(embedding_size, lstm_hidden_size).to(device)
        self.dec = intent_dec(lstm_hidden_size).to(device)
        self.share_memory = torch.zeros(batch, max_len, lstm_hidden_size * 2).to(device)


class Slot(nn.Module):
    def __init__(self):
        super(Slot, self).__init__()
        self.enc = slot_enc(embedding_size, lstm_hidden_size).to(device)
        self.dec = slot_dec(lstm_hidden_size).to(device)
        self.share_memory = torch.zeros(batch, max_len, lstm_hidden_size * 2).to(device)


# =====================================模型代码END=====================================================================

for i in range(num_fold):
    slot_model = torch.load(os.path.join('model', str(i) + 'model_slot_best.ckpt'))
    intent_model = torch.load(os.path.join('model', str(i) + 'model_intent_best.ckpt'))

    torch.save(slot_model.state_dict(), os.path.join('model', str(i) + 'model_slot_best.state_dict'))
    torch.save(intent_model.state_dict(), os.path.join('model', str(i) + 'model_intent_best.state_dict'))
