import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
from gensim.models import KeyedVectors
import utils
from Dataset.Dataset import *
from utils.doc_tool import *
from utils.array_tool import *
from utils.vis_tool import *
from utils.error_analysis import *


def validation(model, val_iter, val_losses):
    model.eval()
    with torch.no_grad():
        ls = []
        for data, label in val_iter:
            data = data.to(device)
            label = label.long().to(device)
            logit = model(data)
            ls.append(F.nll_loss(logit, label).item())
        mean_loss = np.array(ls).mean()
        val_losses.append(mean_loss)
    model.train()
    
def init_network(model, method='kaiming'):
    """
    Initialize our pytorch model.
    
    Args:
    - model: our pytorch model.
    - method: weight initialize method.
    """
    assert method in ['kaiming', 'xavier'], 'Your initialization should be xavier or kaiming'
    for name, w in model.named_parameters():
        if 'weight' in name:
            if method == 'kaiming':
                nn.init.kaiming_normal_(w)
            elif method == 'xavier':
                nn.init.xavier_normal_(w)
            else:
                nn.init.normal_(w)
        elif 'bias' in name:
            nn.init.constant_(w, 0)

# loading our w2v
w2v = KeyedVectors.load_word2vec_format('w2v/Lyric_ChineseEmbedding.txt',binary=False)

# load our data
row_train = np.load('Dataset/data/train.npy')
row_val = np.load('Dataset/data/val.npy')
train_data = TextDataset(dataset2mat(row_train, w2v))
val_data = TextDataset(dataset2mat(row_val, w2v))

# trainval
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
windows = [2, 3, 4]
kernel_num = 256
fc = []

model = TextCNN(windows=windows, kernel_num=kernel_num, fc=fc).to(device)
init_network(model, method='kaiming')
print(model)

# training
lr = 1e-4
epochs = 15
batch_size = 128
train_losses = []
val_losses = []

train_iter = Data.DataLoader(train_data, batch_size=batch_size, shuffle=True,collate_fn=PadCollate(dim=0))
val_iter = Data.DataLoader(val_data, batch_size=batch_size, shuffle=True, collate_fn=PadCollate(dim=0))

optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.75) # adjust learning rate
# scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3, 6, 8], gamma=0.1)
# optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)

# train 前先 eval 一下
validation(model, val_iter, val_losses)

model.train()
for epoch in range(epochs):
    # train each epoch
    scheduler.step() # 学习率衰减
    for data, label in train_iter:
        data = data.to(device)
        label = label.to(device)
        logit = model(data)
        loss = F.nll_loss(logit,label)
        train_losses.append(loss.item())
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
    # val each epoch
    validation(model, val_iter, val_losses)
    print('epoch: %s\t\t test loss: %s' % (epoch, val_losses[-1]))
    
# visualization
vis_loss(train_losses, val_losses)

with torch.no_grad():
    model.eval()
    pred = []
    gt = []
    for data, label in val_iter:
        data = data.to(device)
        label = label.long().to(device)
        gt.extend(label.cpu().numpy().tolist())
        logit = model(data)
        pred.extend(logit.argmax(dim=1).cpu().numpy().tolist())
pred = np.array(pred)
gt = np.array(gt)
error_analysis(pred, gt)
