from cmath import nan
from base import base
from metric import Metric
import numpy as np
import os
import torch
from torch.nn import functional as F
from metric import Scaller, Recorder
from utils.torch_utils import time_synchronized

class Train(base):

    def __init__(self, cfg, model, optimizer, train_loader, valid_loader, metric: Metric, log_writer, device):
        super().__init__(cfg)
        self.device = device
        self.train_loader = train_loader
        self.valid_loader = valid_loader
        self.model = model
        self.metric = metric
        self.optimizer = optimizer

        self.log_writer = log_writer
        self.loss_metric = Scaller()
        self.predict_label_recorder = Recorder()
        self.real_label_recorder = Recorder()
        self.predict_value_recorder = Recorder()


    def set_weight_path(self, time):
        self.weight_path = os.path.join(self.train_cfg.get('log_dir'), 'weights', time)
        if not os.path.exists(self.weight_path):
            os.makedirs(self.weight_path, mode=0o755)

    def __todevice(self, data):
        u_item_id ,audio_feature, face_feature, time_feature, \
        title_feature, video_feature, context_field_scaler, \
        context_field_discrete, behavior_tensor, finish_like = data
        # print(finish_like)
        # finish_like = F.one_hot(finish_like, num_classes = 2)
        behavior_discrete_tensor, behavior_duration_tensor, behavior_time_tensor = behavior_tensor

        return  u_item_id.to(self.device) ,(audio_feature.to(self.device), \
                face_feature.to(self.device), time_feature.to(self.device), \
                title_feature.to(self.device), video_feature.to(self.device), \
                context_field_scaler.to(self.device), context_field_discrete.to(self.device)), \
                (behavior_discrete_tensor.to(self.device), behavior_duration_tensor.to(self.device), \
                behavior_time_tensor.to(self.device)), \
                finish_like[:, 0:1], finish_like[:, 1:]

    def train_one_epoch(self, epoch):
        self.model.train()
        steps = 0
        for data in self.train_loader:
            steps += 1
            self.optimizer.zero_grad()
            u_item_id, data_content, data_behavior, Y_finish, Y_like = self.__todevice(data)
            # Y_like_target = F.one_hot(Y_like, num_classes=2).to(self.device)
            Y_like_target = Y_like.to(self.device)
            out = self.model(u_item_id, data_content, data_behavior)

            loss = self.model.loss(out, Y_like_target.to(torch.float32))
            # print(loss)
            loss.backward()
            self.optimizer.step()

            y_predict = out.data.cpu().numpy()
            # Y_pre = np.argmax(y_predict, axis=1)
            # Y_pre = np.where(y_predict>0.5, 1, 0)

            # self.predict_label_recorder.update(Y_pre)
            self.real_label_recorder.update(Y_like)
            self.predict_value_recorder.update(y_predict)
            if steps % 100 == 0: 
                Y_hat = np.squeeze(self.real_label_recorder())
                
                # Y_pre_label = self.predict_label_recorder()
                Y_pre_value = self.predict_value_recorder()

                # precision_score = self.metric.precision(Y_hat, Y_pre_label)
                # recall_score = self.metric.recall(Y_hat, Y_pre_label)
                # accuracy_score = self.metric.acc(Y_hat, Y_pre_label)
                log_loss = self.metric.log_loss(Y_hat, Y_pre_value)
                auc = self.metric.auc(Y_hat, Y_pre_value)
                
                
                # self.info("epoch: {} \t loss: {:6f} \t auc: {:.5f} \t precision_score@max: {:.5f} \t recall@max: {:.5f} \t accuracy_score: {:.5f} \t logloss: {:.5f}".format(epoch, loss.data.cpu().numpy(), auc, precision_score, recall_score, accuracy_score, log_loss))
                self.info("epoch: {} \t loss: {:6f} \t auc: {:.5f} \t logloss: {:.5f}".format(epoch, loss.data.cpu().numpy(), auc, log_loss))
                # self.predict_label_recorder.reset()

                self.real_label_recorder.reset()
                self.predict_value_recorder.reset()


        self.valid_one_epoch()
        if hasattr(self, 'weight_path'):
            torch.save(self.model, os.path.join(self.weight_path, "{:0>3}.pt".format(str(epoch))))

    @torch.no_grad()
    def valid_one_epoch(self):
        self.model.eval()
        seen = 0
        t_infer = 0
        for data in self.valid_loader:
            seen += 1
            u_item_id, data_content, data_behavior, Y_finish, Y_like = self.__todevice(data)
            t = time_synchronized()
            out = self.model(u_item_id, data_content, data_behavior)
            t_infer += time_synchronized() - t
            y_predict = out.data.cpu().numpy()
            # Y_pre = np.argmax(y_predict, axis=1)
            # Y_pre = np.where(y_predict>0.5, 1, 0)

            # self.predict_label_recorder.update(Y_pre)
            self.real_label_recorder.update(Y_like)
            self.predict_value_recorder.update(y_predict)

        # if seen % 20 == 0:
        Y_hat = self.real_label_recorder()
        # Y_pre_label = self.predict_label_recorder()
        Y_pre_value = self.predict_value_recorder()

        # precision_score = self.metric.precision(Y_hat, Y_pre_label)
        # recall_score = self.metric.recall(Y_hat, Y_pre_label)
        # accuracy_score = self.metric.acc(Y_hat, Y_pre_label)
        log_loss = self.metric.log_loss(Y_hat, Y_pre_value)

        
        auc = self.metric.auc(Y_hat, Y_pre_value)
        # self.info("auc: {:.5f} \t precision_score@max: {:.5f} \t recall@max: {:.5f} \t accuracy_score: {:.5f} \t logloss: {:.5f}".format(auc, precision_score, recall_score, accuracy_score, log_loss))
        self.info("auc: {:.5f} \t logloss: {:.5f}".format(auc, log_loss))
        # self.predict_label_recorder.reset()
        self.real_label_recorder.reset()
        self.predict_value_recorder.reset()
    
        self.info("Speed: %.1f ms inference per data at batchsize %g" % (t_infer / seen * 1E3, self.train_cfg.get("batch_size")))


