import torch
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
@torch.no_grad()
def infer(model_list,result_path,test_data_loader,device='cuda',is_round=False):
    result_list=[]
    report_id_list = []
    for model,ckpt_list in model_list:
        for pth_save_path in ckpt_list:
            state_dict = torch.load(pth_save_path)
            model.load_state_dict(state_dict)
            pres_list = []
            model.to(device)
            model.eval()
            for i, test_data in enumerate(tqdm(test_data_loader)):
                report_ids, word_vec_arrays, = test_data
                inputs = word_vec_arrays.to(device)
                # inputs = self.grid(inputs)
                with torch.no_grad():
                    outputs = model(inputs)
                # predictions.append(output.cpu().numpy())
                # outputs = torch.softmax(outputs,dim=1)

                pres_list += outputs.sigmoid().detach().cpu().numpy().tolist()
                if len(report_id_list)!=len(test_data_loader):
                    report_id_list += list(report_ids)

            result_list.append(pres_list)
    result_array = np.array(result_list)


    with open(result_path, 'w') as file:
        for j in range(result_array.shape[1]):
            result = result_array[:,j]
            result = np.ma.average(result,axis=0)
            # result = np.where(result<0.1,0.000000017397853012,result)
            if is_round:
                result = np.round(result,decimals=2)
            result = list(map(lambda x: str(x), list(result)))
            result_str = ' '.join(result)
            # print(report_id[j] + '|,|' + result_str)
            file.write(report_id_list[j] + '|,|' + result_str)
            file.write('\n')
    print('Done')


if __name__ == '__main__':
    import time
    from datasets.words_data import Word_data

    from torch.utils.data import DataLoader
    test_data = Word_data(r'data/track1_round1_testB.csv',is_train=False)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    test_data_loader = DataLoader(test_data, batch_size=32, shuffle=False, num_workers=8)  # 使用DataLoader加载数据
    # from models.transformer import TransformerEncoder
    # model = TransformerEncoder(859,test_data.max_length,class_num=17).to(device)

    # infer(model,['save/transformer_f0_20210322003259.pth',
    #                           'save/transformer_f1_20210322200625.pth',
    #                           'save/transformer_f2_20210322204346.pth',
    #                           'save/transformer_f3_20210322212106.pth',
    #                           'save/transformer_f4_20210322215841.pth'],
    #       result_path=r'result/transformer_%s.csv'%time.strftime("%Y%m%d%H%M%S", time.localtime()),test_data_loader=test_data_loader,device=device)
    # from models.textCNN import CNN_Text
    # model = CNN_Text(859).to(device)
    # infer(model,['save/textCNN_f4.pth'],
    #       result_path=r'result/transformer_%s.csv'%time.strftime("%Y%m%d%H%M%S", time.localtime()),test_data_loader=test_data_loader,device=device)
    from models.lstm import LSTM
    from models.textCNN import CNN_Text

    embed_num=859
    model = LSTM(num_class=17, need_embedding=True).to(device)
    model_list=[
        [LSTM(num_class=17, need_embedding=True).to(device),
         ['save/lstm_embedding_f4_20210329233954.pth',
          'save/lstm_embedding_f2_20210329230800.pth',
          'save/lstm_embedding_f1_20210329225202.pth',
          'save/lstm_embedding_f0_20210329223556.pth',
          'save/lstm_embedding_f0_20210405210911.pth',
          'save/lstm_embedding_f1_20210405212021.pth',
          'save/lstm_embedding_f2_20210405213425.pth',
          'save/lstm_embedding_f3_20210405215013.pth',
          'save/lstm_embedding_f4_20210405220448.pth',
          'save/lstm_embedding_f0_20210406233920.pth',
          'save/lstm_embedding_f1_20210406233920.pth',
          'save/lstm_embedding_f2_20210406233920.pth',
          'save/lstm_embedding_f3_20210406233920.pth',
          'save/lstm_embedding_f4_20210406233920.pth']
         ],[
            CNN_Text(embed_num),
            [
            'save/textCNN_f0_20210329002915.pth',
            'save/textCNN_f1_20210329013455.pth',
            'save/textCNN_f2_20210329024031.pth',
            'save/textCNN_f3_20210329034607.pth',
            'save/textCNN_f4_20210329045143.pth',
            ]
        ]

    ]
    infer(model_list,
          result_path=r'result/Ensemble_%s.csv'%time.strftime("%Y%m%d%H%M%S", time.localtime()),test_data_loader=test_data_loader,device=device)
