#!/usr/bin/env python3
# _*_ coding:utf-8 _*_

import os
import math
import time
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm


from torch.utils.tensorboard import SummaryWriter
from model import GaussSplattingPde1D
from options import Options
from problem import Poisson_1d

PI = math.pi


class Trainer():
    def __init__(self, args):
        self.args = args

        # args
        self.domain = args.domain
        self.case = args.case
        self.problem = Poisson_1d(self.case)
        self.strategy = args.strategy
        self.n_gauss = args.n_gauss
        self.n_pde = args.n_pde
        self.n_bc = args.n_bc
        self.lr = args.lr
        self.epochs_Adam = args.epochs_adam
        self.epochs_lbfgs = args.epochs_lbfgs
        self.device = args.device
        self.lam = args.lam

        # Network
        self.model = GaussSplattingPde1D(args)
        self._name = self.model._name()

        # self.net.apply(init_weights)

        # Criterion
        self.criterion = nn.MSELoss()

        # Optimizer
        self.optim_adam = optim.Adam(self.model.parameters(), lr=self.lr)
        self.optim_lbfgs = optim.LBFGS(self.model.parameters(), max_iter=20)
        # self.lr_scheduler_adam = optim.lr_scheduler.StepLR(
        #     self.optim_adam, step_size=1000, gamma=0.75)
        self.lr_scheduler_adam = optim.lr_scheduler.ExponentialLR(
            self.optim_adam, gamma=0.85)

        # Dataset
        # trainset = Trainset(args)
        # self.xy_pde, self.xy_bc, self.rhs_pde, self.rhs_bc = trainset()
        #
        # validset = Validset(args)
        # self.tx_valid = validset()

        # Train  tensor
        x_pde_train = np.random.rand(self.n_pde, 1) * 2 * PI
        x_bc_train = np.array([[0], [2*PI]])
        rhs_pde_train = self.problem.rhs(x_pde_train)
        rhs_bc_train = self.problem.u_exact(x_bc_train)

        self.x_pde_train = torch.from_numpy(x_pde_train).float().to(self.device)
        self.x_bc_train = torch.from_numpy(x_bc_train).float().to(self.device)
        self.rhs_pde = torch.from_numpy(rhs_pde_train).float().to(self.device)
        self.rhs_bc = torch.from_numpy(rhs_bc_train).float().to(self.device)

        # Test
        test_data = np.linspace(0, 2 * PI, args.n_valid).reshape((args.n_valid, 1))
        test_data_rhs = self.problem.rhs(test_data)
        self.x_test = torch.from_numpy(test_data).float().to(self.device)
        self.rhs_test = torch.from_numpy(test_data_rhs).float().to(self.device)

        # Plot
        plot_data = np.linspace(0, 2 * PI, args.n_plot).reshape((args.n_plot, 1))
        self.x_plot = torch.from_numpy(plot_data).float().to(self.device)

        # SummaryWriter
        # self.iter = 0  # 记录自适应取样的次数
        self.step = 0  # 记录SummaryWriter上的绘图横坐标
        # self.eps = 5e-3  # 取样的容许误差

        # Resume
        # if args.resume:
        #     if os.path.isfile(args.resume):
        #         print(f'Resuming training, loading {args.resume} ...')
        #         self.net.load_state_dict(torch.load(args.resume)['state'])
        #     else:
        #         print('Input resume error', args.resume)

    def train(self):
        tt = time.time()
        best_loss = float('Inf')

        self.model.to(self.device)
        self.writer = SummaryWriter(
            comment=f'_{self._name}')

        print('-------Start Training-------\n',
              f'device: {self.device}',
              f'model:{self._name}')

        for epoch in range(self.epochs_Adam):

            train_loss = self._train_adam(epoch)

            if (epoch+1) % 1000 == 0:
                self.optim_adam = optim.Adam(self.model.parameters(), lr=self.lr)

                # self.lr_scheduler_adam = optim.lr_scheduler.StepLR(
                #     self.optim_adam, step_size=1000, gamma=0.75)
                self.lr_scheduler_adam = optim.lr_scheduler.ExponentialLR(
                    self.optim_adam, gamma=0.85)

            if (epoch+1) == 1:
                print(f'start loss: {train_loss:.4e}')

            if (epoch+1) % 20 == 0:

                self.step += 1

                self.writer.add_scalar('train loss', train_loss, self.step)

                valid_loss = self._valid(self.step)
                is_best = valid_loss < best_loss

                print(f'Adam:',
                      f'{epoch+1}({self.epochs_Adam}+{self.epochs_lbfgs}),',
                      f'train_loss {train_loss:.3e},',
                      f'valid_loss {valid_loss:.3e},'
                      f'lr {self.lr_scheduler_adam.get_last_lr()[0]:.2e},',
                      f'ellapsed {time.time()-tt:.3f}s')

                if is_best:
                    best_loss = valid_loss

                # state = {
                #     'epoch': epoch,
                #     'state': self.net.state_dict(),
                #     'best_loss': valid_loss
                # }
                #
                # save_checkpoints(
                #     state,
                #     is_best,
                #     save_dir=f'{self.args.model}/{self._name}'
                # )

                tt = time.time()

        for epoch in range(self.epochs_Adam, self.epochs_Adam+self.epochs_lbfgs):
            train_loss = self._train_lbfgs()

            if (epoch+1) % 10 == 0:
                self.step += 1

                self.writer.add_scalar('train loss', train_loss, self.step)

                valid_loss = self._valid(self.step)
                is_best = valid_loss < best_loss

                print(
                    f'LBFGS:',
                    f'{epoch+1}({self.epochs_Adam}+{self.epochs_lbfgs})',
                    f'train_loss {train_loss:.3e}',
                    f'valid_loss {valid_loss:.3e}',
                    f'ellapsed {time.time()-tt:.3f}s'
                )

                if is_best:
                    best_loss = valid_loss

                # state = {'epoch': epoch,
                #          'state': self.net.state_dict(),
                #          'best_loss': valid_loss
                #          }
                # save_checkpoints(state,
                #                  is_best,
                #                  save_dir=f'{self.args.model}/{self._name}')
                tt = time.time()

        self.writer.close()
        print('-------Train Complete-------')

    def _train_adam(self, epoch):
        self.model.train()
        self.optim_adam.zero_grad()

        x = self.x_pde_train

        w = torch.ones_like(x[:, [0]])

        # forward
        x.requires_grad_(True)
        u = self.model(x)
        t = time.time()
        u_x = torch.autograd.grad(
            u, x, grad_outputs=w, create_graph=True)[0][:, [0]]
        u_xx = torch.autograd.grad(
            u_x, x, grad_outputs=w, create_graph=True)[0][:, [0]]
        x.detach_()

        res_pde = u_xx
        res_bc = self.model(self.x_bc_train)

        # loss
        loss_pde = self.criterion(res_pde, self.rhs_pde)
        loss_bc = self.criterion(res_bc, self.rhs_bc)

        loss = loss_pde + self.lam * loss_bc

        # backward
        loss.backward()
        self.optim_adam.step()

        if (epoch+1) % 100 == 0:
            self.lr_scheduler_adam.step()

        loss = loss.item()
        loss_pde = loss_pde.item()
        loss_ic = loss_bc.item()

        return loss
    # , loss_pde, loss_ic, loss_bc

    def _train_lbfgs(self):
        self.model.train()

        # forward
        def closure():
            self.optim_lbfgs.zero_grad()

            x = self.x_pde_train

            w = torch.ones_like(x[:, [0]])

            # forward
            x.requires_grad_(True)
            u = self.model(x)
            u_x = torch.autograd.grad(
                u, x, grad_outputs=w, create_graph=True)[0][:, [0]]
            u_xx = torch.autograd.grad(
                u_x, x, grad_outputs=w, create_graph=True)[0][:, [0]]
            x.detach_()

            res_pde = u_xx
            res_bc = self.model(self.x_bc_train)

            # loss
            loss_pde = self.criterion(res_pde, self.rhs_pde)
            loss_bc = self.criterion(res_bc, self.rhs_bc)

            loss = loss_pde + self.lam * loss_bc

            # backward
            loss.backward()

            # loss = loss.item()
            # loss_pde = loss_pde.item()
            # loss_ic = loss_bc.item()

            return loss

        self.optim_lbfgs.step(closure)
        loss = closure().item()
        return loss

    def _valid(self, step):
        # Using validset to compute valid loss
        self.model.eval()

        x = self.x_test

        w = torch.ones_like(x[:, [0]])

        # forward
        x.requires_grad_(True)
        u = self.model(x)
        u_x = torch.autograd.grad(
            u, x, grad_outputs=w, create_graph=True)[0][:, [0]]
        u_xx = torch.autograd.grad(
            u_x, x, grad_outputs=w, create_graph=True)[0][:, [0]]
        x.detach_()

        res_pde = u_xx

        # loss
        loss_pde = self.criterion(res_pde, self.rhs_test)

        valid_loss = loss_pde
        self.writer.add_scalar('valid loss', valid_loss, step)

        # Plot

        fig = plt.figure(figsize=(10, 6))
        ax = fig.add_subplot(1, 1, 1)
        x = self.x_plot[:, 0].detach().cpu().numpy()
        u = self.model(self.x_plot)[:, 0].detach().cpu().numpy()
        u_exact = self.problem.u_exact(x)
        error = np.linalg.norm(u - u_exact)

        ax.plot(x, u, '-ro', markersize=3)
        ax.plot(x, u_exact, '-bo', markersize=2)

        self.writer.add_figure('Pred', fig, global_step=step)
        self.writer.add_scalar('Error', error, step)

        # print(f"Error:{error:.3e}")

        return valid_loss
    
    def plot_gauss_shape(self):
        mean = self.model.mean.detach().cpu().numpy()
        weights = self.model.weights.detach().cpu().numpy()
        std = self.model.std.detach().cpu().numpy()


        x = np.linspace(0, 2 * PI, 500)
        X = x.reshape(500, 1)

        fig = plt.figure(figsize=(10, 6))
        ax = fig.add_subplot(1, 1, 1)

        target = weights * np.exp(- 0.5 * (X - mean)**2 / std**2)

        ax.plot(x, target)

        plt.show()



if __name__ == '__main__':
    args = Options().parse()
    args.device = torch.device(
        f"cuda:{args.cuda_id}") if args.cuda else torch.device("cpu")
    args.epochs_adam = 50000
    args.epochs_lbfgs = 0
    args.lr = 1e-2

    args.n_gauss = 50
    args.n_pde = 1000
    args.case = 6
    args.n_valid = 200
    args.n_plot = 500

    trainer = Trainer(args)
    trainer.train()
    trainer.plot_gauss_shape()

    # for args.n_gauss in [100, 200, 300]:
    #     for args.n_pde in [100, 500, 1000]:
    #         for args.lr in [1e-2, 1e-3]:
    #             trainer = Trainer(args)
    #             trainer.train()
    # print(trainer.x_pde_train)
    # print(trainer.x_bc_train)
    # print(trainer.x_test)
    # print(trainer.x_plot)