from models.lstm import LSTM
import torch
import torch.nn as nn
from models.radam import RAdam
from models.focal_loss import FocalLoss
from datasets.words_data import Word_data
from torch.utils.data import DataLoader

from tqdm import tqdm
BATCH_SIZE= 128

# device = torch.device("cpu")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_data = Word_data(r'data/track1_round1_train_20210222.csv', is_train=True)
val_data = Word_data(r'data/track1_round1_val_20210222.csv', is_train=True)
test_data = Word_data(r'data/track1_round1_testA_20210222.csv',is_train=False)
train_data_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=True)
val_data_loader = DataLoader(val_data, batch_size=BATCH_SIZE, shuffle=False)
test_data_loader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=False)  # 使用DataLoader加载数据

model = LSTM(num_class=17,need_embedding=True).to(device)
epoch=50
optimizer = RAdam(model.parameters(),lr=0.001)
criterion = nn.BCEWithLogitsLoss()
min_val_loss = float('inf')
min_epoch = 0

cur_model_w = model.state_dict()
for iter in range(1,epoch+1):
    print('------------------%i--------------------'%iter)
    train_loss = 0.0
    model.train()
    for i, training_data in enumerate(tqdm(train_data_loader)):
        report_ids, words_list, labels = training_data
        inputs = words_list.to(device)
        # inputs = self.grid(inputs)
        labels = labels.to(device)
        optimizer.zero_grad()
        # print(inputs.max())
        # print(inputs.min())
        output = model(inputs)
        loss = criterion(output, labels.float())

        loss.backward()
        optimizer.step()
        train_loss += loss.item()
    print(train_loss)

    model.eval()
    val_loss = 0.0
    for i, val_data in enumerate(tqdm(val_data_loader)):
        report_ids, word_vec_arrays, labels = val_data
        inputs = word_vec_arrays.to(device)
        # inputs = self.grid(inputs)
        labels = labels.to(device)
        with torch.no_grad():
            output = model(inputs)
            loss = criterion(output, labels.float())

        val_loss += loss.item()

    print(val_loss)
    # if val_loss<0.9:
    #     break
    if val_loss<min_val_loss:
        min_val_loss = val_loss
        min_epoch = iter
        cur_model_w = model.state_dict()
        torch.save(cur_model_w, 'save/lstm_embedding.pth')

state_dict = torch.load('save/lstm_embedding.pth')
model.load_state_dict(state_dict)
print(min_epoch,min_val_loss)
predictions = []
model.eval()
val_loss = 0.0
for i, val_data in enumerate(tqdm(val_data_loader)):
    report_ids, word_vec_arrays, labels = val_data
    inputs = word_vec_arrays.to(device)
    # inputs = self.grid(inputs)
    labels = labels.to(device)
    with torch.no_grad():
        output = model(inputs)
        loss = criterion(output, labels.float())

    val_loss += loss.item()

print('Val Loss:',val_loss)
with open('results.csv','w') as file:
    for i, test_data in enumerate(tqdm(test_data_loader)):
        report_ids, word_vec_arrays, = test_data
        inputs = word_vec_arrays.to(device)
        # inputs = self.grid(inputs)
        with torch.no_grad():
            outputs = model(inputs)
        # predictions.append(output.cpu().numpy())
        outputs = torch.softmax(outputs,dim=1)
        results = outputs.cpu().numpy()
        for j,result in enumerate(results):
            result = list(map(lambda x:str(x),list(result)))
            result_str = ' '.join(result)
            # print(report_id[j] + '|,|' + result_str)
            file.write(report_ids[j] + '|,|' + result_str)
            file.write('\n')
    print('Done')



#
# for i,(report_id, words), in enumerate(testing_data_list):
#     result = []
#     for j in range(len(label_list[i])):
#         result.append(str(predictions[j][i]))
#     print(report_id+'|,|'+' '.join(result))