import os
import torch
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import pandas as pd
import numpy as np
import time
import math
import torch.nn.functional as F

class NameDataset(Dataset):
    def __init__(self, train_df,idx):
        self.names=[]
        self.countries=[]
        train_df = train_df.loc[idx, :].reset_index(drop=True)
        for i in range(train_df.shape[0]):
            name = train_df.loc[i]['description']
            con = train_df.loc[i]['label']
            self.names.append(name)
            self.countries.append(con)


        self.len = len(self.names)
        self.country_list = list(sorted(set(self.countries)))
        self.country_dict = self.getCountryDict()
        self.country_num = len(self.country_list)
    def get_dumm(self,s):
        re=[0]*29
        if s=='':
            return re
        else:
            tmp = []
            for i in s.split(' '):
                if i!='':
                    tmp.append(int(i))
            for i in tmp:
                re[i]=1
        return re

    def __getitem__(self, idx):
        text = self.names[idx]
        text = [int(i) for i in text.split(' ')]
        if len(text) > 50:
            text = text[:50]
        else:
            text = text + [858] * (50 - len(text))
        label = self.countries[idx]
        # print(label,[i for i in label])
        label = self.get_dumm(label)  # 转one-hot向量
        return np.array(text), np.array(label)

    def __len__(self):
        return self.len

    def getCountryDict(self):
        country_dict = dict()
        for idx, country_name in enumerate(self.country_list, 0):
            country_dict[country_name] = idx
        return country_dict

    def idx2country(self, index):
        return self.country_list[index]

    def getCountriesNum(self):
        return self.country_num


#classifier = RNNClassifier(N_CHARS, HIDDEN_SIZE, N_COUNTRY, N_LAYER)
class RNNClassifier(torch.nn.Module):
    def __init__(self, input_size, hidden_size, output_size, n_layers=2, bidirectional=True):
        super(RNNClassifier, self).__init__()
        self.hidden_size = hidden_size
        self.n_layers = n_layers
        self.n_directions = 2 if bidirectional else 1  # 使用双向的GRU

        # 嵌入层（𝑠𝑒𝑞𝐿𝑒𝑛, 𝑏𝑎𝑡𝑐ℎ𝑆𝑖𝑧𝑒） --> (𝑠𝑒𝑞𝐿𝑒𝑛, 𝑏𝑎𝑡𝑐ℎ𝑆𝑖𝑧𝑒, hidden_size)
        self.embedding = torch.nn.Embedding(input_size, hidden_size)
        self.gru = torch.nn.GRU(hidden_size, hidden_size, n_layers,bias=True,bidirectional=bidirectional)
        self.fc = torch.nn.Linear(hidden_size * self.n_directions, output_size)
        self.dropout=torch.nn.Dropout(0.7)
    def attention_net(self, lstm_output, final_state):
        hidden = final_state.view(-1, self.hidden_size * 2, 1)   # hidden : [batch_size, n_hidden * num_directions(=2), 1(=n_layer)]
        attn_weights = torch.bmm(lstm_output, hidden).squeeze(2) # attn_weights : [batch_size, n_step]
        soft_attn_weights = F.softmax(attn_weights, 1)
        # [batch_size, n_hidden * num_directions(=2), n_step] * [batch_size, n_step, 1] = [batch_size, n_hidden * num_directions(=2), 1]
        context = torch.bmm(lstm_output.transpose(1, 2), soft_attn_weights.unsqueeze(2)).squeeze(2)
        return context# context : [batch_size, n_hidden * num_directions(=2)]

    def _init_hidden(self, batch_size):
        hidden = torch.zeros(self.n_layers * self.n_directions, batch_size, self.hidden_size)
        return hidden

    def forward(self, input):
        # input shape : B x S -> S x B
        device = torch.device("cuda:0")
        input = input.t()
        batch_size = input.size(1)
        hidden = self._init_hidden(batch_size).to(device)
        embedding = self.embedding(input)
        embedding = self.dropout(embedding)
        # pack them up
        #gru_input = torch.nn.utils.rnn.pack_padded_sequence(embedding, seq_lengths)
        output, hidden = self.gru(embedding, hidden)
        if self.n_directions == 2:
            hidden_cat = torch.cat([hidden[-1], hidden[-2]], dim=1)
        else:
            hidden_cat = hidden[-1]
        output = output.permute(1, 0, 2)  # output : [batch_size, len_seq, n_hidden]
        attn_output = self.attention_net(output, hidden_cat)
        fc_output = self.fc(attn_output)
        return fc_output

def trainModel(lr_scheduler):
    #device = torch.device('cuda')
    total_iters=len(trainLoader)
    print('total_iters:{}'.format(total_iters))
    since = time.time()
    best_loss = 1e7
    best_epoch = 0
    print_interval = -1
    #
    iters = len(trainLoader)
    model_save_dir='../user_data/model_data'
    if not os.path.exists(model_save_dir): os.makedirs(model_save_dir)
    def time_since(since):
        s = time.time() - since
        m = math.floor(s / 60)
        s -= m * 60
        return '%dm %ds' % (m, s)

    total_loss = 0
    for epoch in range(1, N_EPOCHS + 1):
        classifier.train(True)
        begin_time=time.time()
        print('learning rate:{}'.format(optimizer.param_groups[-1]['lr']))
        print('Fold{} Epoch {}/{}'.format(fold+1,epoch, N_EPOCHS))
        print('-' * 10)
        running_corrects_linear = 0
        count=0
        train_loss = []
        for i, (names, countries) in enumerate(trainLoader, 1):
            # print(type(names), type(countries))
            # print(len(names), countries.shape)
            count+=1
            inputs = names
            target = countries
            inputs = inputs.type(torch.LongTensor).to(device)
            target = target.to(device).float()
            output = classifier(inputs)
            # print("Shape:", output.shape, target.shape)
            # 注意输出和目标的维度：Shape: torch.Size([256, 18]) torch.Size([256])
            loss = criterion(output, target)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # 更新cosine学习率
            if lr_scheduler != None:
                lr_scheduler.step(epoch + count / iters)
            if print_interval > 0 and (i % print_interval == 0 or output.size()[0] < BATCH_SIZE):
                spend_time = time.time() - begin_time
                print(
                    ' Fold:{} Epoch:{}({}/{}) loss:{:.3f} lr:{:.7f} epoch_Time:{}min:'.format(
                        fold + 1, epoch, count, total_iters,
                        loss.item(), optimizer.param_groups[-1]['lr'],
                        spend_time / count * total_iters // 60 - spend_time // 60))
            #
            train_loss.append(loss.item())
            # lr_scheduler.step()
        val_auc, val_loss = val_model(classifier, criterion)
        print('valLogLoss: {:.4f} valAuc: {:.4f}'.format(val_loss, val_auc))
        model_out_path = model_save_dir + "/" + 'fold_' + str(fold + 1) + '_' + str(epoch) + '.pth'
        best_model_out_path = model_save_dir + "/" + 'fold_' + str(fold + 1) + '_best' + '.pth'
        # save the best model
        if val_loss < best_loss:
            best_loss = val_loss
            best_epoch = epoch
            torch.save(classifier.state_dict(), best_model_out_path)
            print("save best epoch: {} best auc: {} best logloss: {}".format(best_epoch, val_auc, val_loss))
        # save based on epoch interval
        # if epoch % 5  == 0 and epoch>30:
        # torch.save(model.state_dict(), model_out_path)
        #
    print('Fold{} Best logloss: {:.3f} Best epoch:{}'.format(fold + 1, best_loss, best_epoch))
    #time_elapsed = time.time() - since
    # print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
    return best_loss


@torch.no_grad()
def val_model(model, criterion):
    dset_sizes=len(val_dataset)
    model.eval()
    running_loss = 0.0
    running_corrects = 0
    cont = 0
    outPre = []
    outLabel = []
    pres_list=[]
    labels_list=[]
    for data in val_loader:
        inputs, labels = data
        inputs = inputs.type(torch.LongTensor)
        labels = labels.type(torch.LongTensor)
        inputs, labels = inputs.cuda(), labels.cuda()
        outputs = model(inputs)
        #pres_list.append(outputs.sigmoid().detach().cpu().numpy())
        #labels_list.append(labels.detach().cpu().numpy())
        pres_list+=outputs.sigmoid().detach().cpu().numpy().tolist()
        labels_list+=labels.detach().cpu().numpy().tolist()
    #
    #preds = np.concatenate(pres_list)
    #labels = np.concatenate(labels_list)
    #val_auc = metrics.roc_auc_score(labels, preds, multi_class='ovo')
    lab=np.array(labels_list)
    pred=np.array(pres_list)
    b = -np.sum(lab * np.log(pred) + (1 - lab) * np.log(1-pred+1e-5)) / (17*3000)
    #print("s: ",1-b)
    #val_auc = metrics.roc_auc_score(labels_list, pres_list, multi_class='ovo')
    log_loss=metrics.log_loss(labels_list, pres_list)#
    return 1-b,log_loss


if __name__ == '__main__':
    train_df = pd.read_csv('../tcdata/medical_nlp_round1_data/train.csv', header=None)
    train_df.columns = ['report_ID', 'description', 'label1','label2']
    train_df.drop(['report_ID'], axis=1, inplace=True)
    print("train_df:{}".format(train_df.shape))
    i1 = 0
    for i in train_df['label2']:

        if type(i)!=str:
            if math.isnan(i):
                train_df['label2'][i1]=''
        i1=i1+1

    new_des = [i.strip('|').strip() for i in train_df['description'].values]
    new_label = [i.strip('|').strip() for i in train_df['label1'].values]
    b=[]
    for i in range(len(train_df)):
        a=train_df['label1'][i].strip('|')
        a1=train_df['label2'][i].strip()
        if a1!='':
            s=''
            for e in a1:
                if e!=' ':
                    e1=int(e)+17
                    s=s+str(e1)+' '
            a=a+s
            a=a.rstrip()
        b.append(a)

    train_df['description'] = new_des
    train_df['label'] = b

    folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2021).split(np.arange(train_df.shape[0]),
                                                                               train_df.label.values)
    kfold_best = []
    for fold, (trn_idx, val_idx) in enumerate(folds):
        device = torch.device("cuda:0")
        HIDDEN_SIZE = 128
        BATCH_SIZE = 16
        val_batch_size = 32
        N_LAYER = 2
        N_CHARS = 859  # 这个是为了构造嵌入层
        N_COUNTRY = 29
        N_EPOCHS = 50

        trainSet = NameDataset(train_df,trn_idx)
        trainLoader = DataLoader(trainSet, batch_size=BATCH_SIZE, shuffle=True)
        val_dataset = NameDataset(train_df,val_idx)
        val_loader = DataLoader(val_dataset, batch_size=val_batch_size, shuffle=False)

        classifier = RNNClassifier(N_CHARS, HIDDEN_SIZE, N_COUNTRY, N_LAYER)
        classifier.to(device)

        criterion = torch.nn.BCEWithLogitsLoss()
        optimizer = torch.optim.AdamW(classifier.parameters(), lr=1e-3, weight_decay=5e-4)
        start = time.time()
        print("Training for %d epochs..." % N_EPOCHS)
        acc_list = []
        lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=3, T_mult=2, eta_min=1e-5,
                                                                            last_epoch=-1)
        best_loss=trainModel(lr_scheduler)
        kfold_best.append(best_loss)
    print("local cv:",kfold_best,np.mean(kfold_best))



