import torch
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
import torch.nn.functional as F

class RNNClassifier(torch.nn.Module):
    def __init__(self, input_size, hidden_size, output_size, n_layers=2, bidirectional=True):
        super(RNNClassifier, self).__init__()
        self.hidden_size = hidden_size
        self.n_layers = n_layers
        self.n_directions = 2 if bidirectional else 1  # 使用双向的GRU

        # 嵌入层（𝑠𝑒𝑞𝐿𝑒𝑛, 𝑏𝑎𝑡𝑐ℎ𝑆𝑖𝑧𝑒） --> (𝑠𝑒𝑞𝐿𝑒𝑛, 𝑏𝑎𝑡𝑐ℎ𝑆𝑖𝑧𝑒, hidden_size)
        self.embedding = torch.nn.Embedding(input_size, hidden_size)
        self.gru = torch.nn.GRU(hidden_size, hidden_size, n_layers,bias=True,bidirectional=bidirectional)
        self.fc = torch.nn.Linear(hidden_size * self.n_directions, output_size)
        self.dropout=torch.nn.Dropout(0.7)
    def attention_net(self, lstm_output, final_state):
        hidden = final_state.view(-1, self.hidden_size * 2, 1)   # hidden : [batch_size, n_hidden * num_directions(=2), 1(=n_layer)]
        attn_weights = torch.bmm(lstm_output, hidden).squeeze(2) # attn_weights : [batch_size, n_step]
        soft_attn_weights = F.softmax(attn_weights, 1)
        # [batch_size, n_hidden * num_directions(=2), n_step] * [batch_size, n_step, 1] = [batch_size, n_hidden * num_directions(=2), 1]
        context = torch.bmm(lstm_output.transpose(1, 2), soft_attn_weights.unsqueeze(2)).squeeze(2)
        return context# context : [batch_size, n_hidden * num_directions(=2)]

    def _init_hidden(self, batch_size):
        hidden = torch.zeros(self.n_layers * self.n_directions, batch_size, self.hidden_size)
        return hidden

    def forward(self, input):
        # input shape : B x S -> S x B
        device = torch.device("cuda:0")
        input = input.t()
        batch_size = input.size(1)
        hidden = self._init_hidden(batch_size).to(device)
        embedding = self.embedding(input)
        embedding = self.dropout(embedding)
        # pack them up
        #gru_input = torch.nn.utils.rnn.pack_padded_sequence(embedding, seq_lengths)
        output, hidden = self.gru(embedding, hidden)
        if self.n_directions == 2:
            hidden_cat = torch.cat([hidden[-1], hidden[-2]], dim=1)
        else:
            hidden_cat = hidden[-1]
        output = output.permute(1, 0, 2)  # output : [batch_size, len_seq, n_hidden]
        attn_output = self.attention_net(output, hidden_cat)
        fc_output = self.fc(attn_output)
        return fc_output
def load_model(weight_path):
    print(weight_path)
    HIDDEN_SIZE = 128
    N_COUNTRY = 29
    N_LAYER = 2
    N_CHARS = 859
    model=RNNClassifier(N_CHARS,HIDDEN_SIZE, N_COUNTRY, N_LAYER)
    model.load_state_dict(torch.load(weight_path))
    model.to(device)
    model.eval()
    return model

@torch.no_grad()
def predict(texts):
    pres_all=[]
    for text in tqdm(texts):
        text=[int(i) for i in text.split(' ')]
        if len(text)>50:
            text=text[:50]
        else:
            text=text+[858]*(50-len(text))
        #
        text=torch.from_numpy(np.array(text))
        text=text.unsqueeze(0)
        text=text.type(torch.LongTensor).cuda()
        #
        for i in range(len(model_list)):
            model=model_list[i]
            outputs=model(text)
            outputs=outputs.sigmoid().detach().cpu().numpy()[0]
            if i==0:
                pres_fold=outputs/len(model_list)
            else:
                pres_fold+=outputs/len(model_list)
        #
        pres_fold=[str(p) for p in pres_fold]
        pres_fold=' '.join(pres_fold)
        pres_all.append(pres_fold)
    return pres_all

if __name__=="__main__":
    device=torch.device('cuda')
    model_list=[]
    for i in range(5):
        model_list.append(load_model('../user_data/model_data/fold_'+str(i+1)+'_best.pth'))
    #
    #test_df=pd.read_csv('../tcdata/medical_nlp_round1_data/track1_round1_testA_20210222.csv',header=None)
    test_df = pd.read_csv('../tcdata/medical_nlp_round1_data/test.csv', header=None)
    #
    test_df.columns=['report_ID','description']
    submit=test_df.copy()
    print("test_df:{}".format(test_df.shape))
    new_des=[i.strip('|').strip() for i in test_df['description'].values]
    test_df['description']=new_des
    sub_id=test_df['report_ID'].values
    #
    print(sub_id[0])
    save_dir='../prediction_result/'
    if not os.path.exists(save_dir): os.makedirs(save_dir)
    pres_all=predict(new_des)

    str_w=''
    with open(save_dir+'result.csv','w') as f:
        for i in range(len(sub_id)):
            str_w+=sub_id[i]+','+'|'+pres_all[i]+'\n'
        str_w=str_w.strip('\n')
        f.write(str_w)
    #