import numpy as np
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn as nn
from net import RegressionNet
from dataset.dataLoader import CreateDataset
import os
import shutil
import logging
import pandas as pd
from vision import plot_loss, plot_output, plot_corr, show_predict_result
from utils import save_equation
from utils import Avg

class Trainer:
    def __init__(self, args):
        self.args = args

        self.data = CreateDataset(args)
        self._init_dir()
        self._init_logger()
        # self._init_model()
        # self._load_data()

    def _init_logger(self):
        self.logger = self.args.logger
        add = ''
        if self.args.step_back:
            add = '_step_back'
        handler = logging.FileHandler('results/log%s.txt'%add, mode="w")
        handler.setLevel(logging.INFO)
        formatter = logging.Formatter("%(message)s %(asctime)s")
        handler.setFormatter(formatter)
        self.logger.addHandler(handler)

    def print_args(self):
        self.logger.info("-------------------  args  ------------------")
        for k in list(vars(self.args).keys()):
            self.logger.info("%20s:%-15s" % (k, vars(self.args)[k]))
        self.logger.info("-------------------  args  ------------------\n")

    def _init_dir(self):
        if not os.path.exists('results/'):
            os.mkdir('results/')
        if not os.path.exists('results/model/'):
            os.mkdir('results/model/')
        if not os.path.exists("results/Img/"):
            os.mkdir("results/Img/")
        if not os.path.exists("results/Img/loss"):
            os.mkdir("results/Img/loss")
        if not os.path.exists("results/Img/output"):
            os.mkdir("results/Img/output")
        if not os.path.exists('results/Img/step_back/'):
            os.mkdir('results/Img/step_back/')
        if not os.path.exists('results/Img/all/'):
            os.mkdir('results/Img/all/')
        self.save_path = 'results/'

    def reset_args(self, args):
        self.args = args

    def _init_model(self, ):
        in_features = self.args.in_features
        if self.args.step_back:
            in_features = self.args.n
        self.net = RegressionNet(in_features, 1)
        self.net.to(self.args.device)
        self.optimizer = torch.optim.Adam(self.net.parameters(), )
        self.criterion = nn.MSELoss()
        self.l1_criterion = nn.L1Loss()
        self.scheduler = ReduceLROnPlateau(self.optimizer,
                                           patience=self.args.epochs/4,
                                           factor=0.5,
                                           verbose=True,
                                           )

    def _load_data(self):
        self.dataLoader = self.data.getDataLoader(self.args.season, self.args.sheet_name)
        self.support = self.data.get_support()

    def save_model(self):
        state_dict = self.net.state_dict()
        add = ''
        if self.args.step_back:
            add = '_step_back'
        filename = self.args.season + '_' + self.args.sheet_name + add + '.pt'

        torch.save(state_dict, self.save_path + 'model/' + filename)

    def load_model(self):
        self._init_model()
        if self.args.varity_all:
            in_features = self.args.n
            self.net2 = RegressionNet(in_features, 1)     # 逐步回归的模型
            add = '_step_back'
            name = self.args.season + '_' + self.args.sheet_name + add + '.pt'
            self.net2.load_state_dict(torch.load(self.save_path + 'model/' + name, map_location='cpu'))
            sign_step_back = self.args.step_back
            self.args.step_back = True
            data2 = CreateDataset(self.args)
            self.dataLoader2 = data2.getDataLoader(self.args.season, self.args.sheet_name)
            self.args.step_back = sign_step_back

        add = ''
        if self.args.step_back:
            add = '_step_back'
        filename = self.args.season + '_' + self.args.sheet_name + add + '.pt'
        print(filename)
        self.net.load_state_dict(torch.load(self.save_path + 'model/' + filename, map_location="cpu"))
        self._load_data()

    def init_eval(self):
        if self.args.step_back:
            shutil.rmtree('results/Img/step_back')
        else:
            shutil.rmtree('results/Img/output')
        self._init_dir()

    def varity(self):
        self.load_model()
        filename = self.args.season + '_' + self.args.sheet_name

        for batch, (date, inputs, targets) in enumerate(self.dataLoader, 1):
            inputs = inputs.to(self.args.device)
            targets = targets.to(self.args.device)
            outputs = self.net(inputs)
        if self.args.varity_all:
            for batch, (date, inputs, targets) in enumerate(self.dataLoader2, 1):
                inputs = inputs.to(self.args.device)
                targets = targets.to(self.args.device)
                step_back_outputs = self.net2(inputs)

        l1_loss = self.l1_criterion(targets.squeeze(), outputs.squeeze())
        l2_loss = self.criterion(targets.squeeze(), outputs.squeeze())
        inputs = inputs.detach().cpu().numpy()
        targets = targets.detach().cpu().view(-1, 1).numpy()
        outputs = outputs.detach().cpu()
        inputs_columns, targets_columns = self.data.getcolumns()
        if not self.args.varity_all:
            columns = inputs_columns + targets_columns
            data = np.concatenate([inputs, targets], axis=1)

            df = pd.DataFrame(data, columns=columns)
        dir = "output"
        if self.args.step_back:
            dir = "step_back"
        elif self.args.varity_all:
            dir = 'all'
        if not self.args.varity_all:
            plot_corr(df, 'results/Img/%s/' % dir + filename)
        if not self.args.varity_all:
            data = {'output': outputs.detach().squeeze().numpy()}
        else:
            data = {'bp': outputs.detach().squeeze().numpy(), '逐步回归': step_back_outputs.detach().squeeze().numpy()}
        # print(dir+filename)
        modes = plot_output(targets.squeeze(), data, list(date), 'results/Img/%s/' % dir + filename)
        show_predict_result(targets.squeeze(), outputs.detach().squeeze().numpy(), 'results/Img/%s/%s_scatter.png' % (dir, filename))

        save_equation(support=self.support,
                      weights=self.net.state_dict()['net.0.weight'][0],
                      bias=self.net.state_dict()['net.0.bias'][0],
                      sign=filename,
                      l1_loss=l1_loss,
                      l2_loss=l2_loss,
                      path='results/Img/%s/' % dir + 'other.csv'
                      )

    def train(self):
        self.print_args()
        self._init_model()
        self._load_data()
        losses = []

        for epoch in range(1, self.args.epochs + 1):
            avg = Avg()
            for batch, (date, inputs, targets) in enumerate(self.dataLoader, 1):
                inputs = inputs.to(self.args.device)
                targets = targets.to(self.args.device)
                outputs = self.net(inputs)
                loss = self.criterion(outputs.squeeze(), targets)
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                print("iter: %s/%s  [==============]  loss: %-.4f" % (epoch, self.args.epochs, loss.item()))
                losses.append(loss.item())
                avg(loss.item())

            self.scheduler.step(avg.mean)

        self.logger.info("\n\n\n\n")
        self.save_model()
        add = ''
        if self.args.step_back:
            add = "_step_back"
        filename = self.args.season + '_' + self.args.sheet_name + add
        plot_loss(losses, 'results/Img/loss/' + filename + '.png')
        # plot_output(targets.cpu().detach().numpy(), outputs.cpu().detach().numpy(), list(date), 'results/Img/output/'+filename+'.png')
