# Ablation study operated on a parametric PDE
# (Need to redo)
# Used to test the influence of iterative linear solver and maximal iterative times 'K'

import sys
sys.path.append('../')

import torch
import numpy as np
import torch.nn.functional as F
from utils import *

from MyPlot import *
from tqdm import tqdm
from torch.utils.data import DataLoader, Dataset
from BaseTrainer import BaseTrainer
from itertools import product
from scipy.sparse.linalg import spsolve
from demo1 import f0


def hard_encode(x, gd):
	y = F.pad(x, (1, 1, 1, 1), "constant", value=gd)
	return y

# OldTrainDs is designed for the problem with random diffusive coefficient
# class OldTrainDs(Dataset):
# 	def __init__(self, GridSize, dtype, device, trainN, area=((0, 0), (1, 1))):
# 		self.GridSize = GridSize
# 		self.dtype = dtype
# 		self.device = device
# 		(left, bottom), (right, top) = area
# 		self.h = (right - left) / (GridSize - 1)
# 		xx, yy = np.meshgrid(
# 			np.linspace(left, right, GridSize), np.linspace(bottom, top, GridSize)
# 		)
# 		self.force = normal(xx, yy, self.h, [(right-left)/2, (top - bottom)/2])
# 		# self.force = f0(xx, yy, k=4)
# 		# self.xx, self.yy = torch.from_numpy(xx), torch.from_numpy(yy)
# 		self.area = area
# 		self.trainN = trainN
		
# 	def __len__(self):
# 		return self.trainN

# 	def __getitem__(self, index):
# 		force = torch.from_numpy(self.force).to(self.dtype).to(self.device)[None, ...]
		
# 		kappa = np.random.uniform(0.1, 2, (self.GridSize, self.GridSize))
# 		kappa = torch.from_numpy(kappa).to(self.dtype).to(self.device)[None, ...]
# 		data = torch.clone(torch.detach(kappa))
# 		return data, force, kappa
	
# class OldValDs(OldTrainDs):
# 	def __getitem__(self, index):
# 		kappa = np.random.uniform(0.1, 2, (self.GridSize, self.GridSize))
# 		A = reaction_A(self.GridSize, kappa).tocsr()
# 		b = reaction_b_dir(self.force, 0, self.h)
# 		ans = spsolve(A, b).reshape(self.GridSize, self.GridSize)
		
# 		force = torch.from_numpy(self.force).to(self.dtype).to(self.device)[None, ...]
		
# 		kappa = torch.from_numpy(kappa).to(self.dtype).to(self.device)[None, ...]
# 		data = torch.clone(torch.detach(kappa))
# 		ans = torch.from_numpy(ans).to(self.dtype).to(self.device)[None, ...]

# 		return data, force, kappa, ans
	
# TrainDs is designed for the problem with parametric diffusive coefficient
# kappa = exp(-\mu (x**2 + y**2)), where \mu \in [0, 1]

class TrainDs(Dataset):
	def __init__(self, GridSize, dtype, device, trainN, area=((-1, -1), (1, 1))):
		self.GridSize = GridSize
		self.dtype = dtype
		self.device = device
		self.area = area
		self.trainN = trainN
		
		(left, bottom), (right, top) = area
		self.h = (right - left) / (GridSize - 1)

		self.xx, self.yy = np.meshgrid(
			np.linspace(left, right, GridSize), np.linspace(bottom, top, GridSize)
		)
		self.mus = np.random.uniform(0, 1, trainN)
		self.force = np.ones((GridSize, GridSize))
		# self.force = normal(self.xx, self.yy, self.h, [(right-left)/2, (top - bottom)/2])

		
	def __len__(self):
		return self.trainN

	def kappa(self, mu):
		return np.exp(-mu * (self.xx ** 2 + self.yy**2))

	def __getitem__(self, index):
		mu = self.mus[index]
		force = torch.from_numpy(self.force).to(self.dtype).to(self.device)[None, ...]
		kappa = torch.from_numpy(self.kappa(mu)).to(self.dtype).to(self.device)[None, ...]
		data = torch.clone(torch.detach(kappa))
		return data, force, kappa
	
class ValDs:
	def __init__(self, GridSize, dtype, device, valN, area=((-1, -1), (1, 1))):
		self.GridSize = GridSize
		self.dtype = dtype
		self.device = device
		self.area = area
		self.valN = valN

		self.kappas = np.load(f'./DLData/{GridSize}/Demo2/ValKappa.npy')
		self.sols = np.load(f'./DLData/{GridSize}/Demo2/ValSol.npy')
		self.force = np.ones((GridSize, GridSize))
		

		(left, bottom), (right, top) = area
		self.h = (right - left) / (GridSize - 1)
		self.xx, self.yy = np.meshgrid(
			np.linspace(left, right, GridSize), 
			np.linspace(bottom, top, GridSize)
		)

	def __len__(self):
		return self.valN

	def __getitem__(self, index):
		kappa = self.kappas[index]
		ans = self.sols[index]

		force = torch.from_numpy(self.force).to(self.dtype).to(self.device)[None, ...]
		kappa = torch.from_numpy(kappa).to(self.dtype).to(self.device)[None, ...]
		ans = torch.from_numpy(ans).to(self.dtype).to(self.device)[None, ...]
		data = torch.clone(torch.detach(kappa))
		return data, force, kappa, ans
		
class JacGenerator(torch.nn.Module):
	def __init__(
		self, batch_size, GridSize, dtype, device, max_iter, area, gd=0
	):
		super().__init__()
		self.batch_size = batch_size
		self.GridSize = GridSize
		(left, bottom), (right, top) = area
		self.h = (right - left) / (GridSize - 1)
		self.dtype = dtype
		self.device = device
		self.max_iter = max_iter
		self.gd = gd
		self.hard_encode = lambda x: hard_encode(x, self.gd)

		self.k1 = self._get_kernel([[0, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0]])
		self.k2 = self._get_kernel([[0, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0]])
		self.k3 = self._get_kernel([[0, 0.5, 0], [0.5, 2, 0.5], [0, 0.5, 0]])

	def _get_kernel(self, k):
		k = torch.tensor(k, requires_grad=False)
		k = k.view(1, 1, 3, 3).repeat(1, 1, 1, 1).to(self.dtype).to(self.device)
		return k

	def jac_step(self, pre, f, w):
		u = self.hard_encode(pre)

		force = f[..., 1:-1, 1:-1] * self.h**2
		y1 = F.conv2d(u, self.k1) * w[..., 1:-1, 1:-1]
		y2 = F.conv2d(w * u, self.k2)
		y3 = F.conv2d(w, self.k3)
		return (force + y1 + y2) / y3

	def forward(self, pre, f, kappa):
		with torch.no_grad():
			u = torch.clone(torch.detach(pre))
			f = torch.clone(torch.detach(f))
			kappa = torch.clone(torch.detach(kappa))
			for _ in range(self.max_iter):
				u = self.jac_step(u, f, kappa)
		return u

class DescentGenerator(JacGenerator):
	def __init__(self, *args, **kwargs):
		super().__init__(*args, **kwargs)
	
	def Ax(self, x, w):
		with torch.no_grad():
			y1 = F.conv2d(x, self.k1) * w[..., 1:-1, 1:-1]
			y2 = F.conv2d(w * x, self.k2)
			y3 = F.conv2d(w, self.k3)
			y = y3 * x[..., 1:-1, 1:-1] - y1 - y2
			return y

	def step(self, pre, f, w):
		x = self.hard_encode(pre)
		b = f[..., 1:-1, 1:-1] * self.h**2
		r = b - self.Ax(x, w)

		Ar = self.Ax(self.hard_encode(r), w)
		alpha = torch.sum(r**2, dim=(1, 2, 3), keepdim=True) / torch.sum(r * Ar, dim=(1, 2, 3), keepdim=True)
		# print(alpha.shape)
		y = pre + alpha * r
		return y

	def forward(self, pre, f, kappa):
		with torch.no_grad():
			u = torch.clone(torch.detach(pre))
			f = torch.clone(torch.detach(f))
			kappa = torch.clone(torch.detach(kappa))
			for i in range(self.max_iter):
				u = self.step(u, f, kappa)
		return u
class Trainer(BaseTrainer):
	def __init__(
		self,
		gd=0,
		method = 'Jac-3',
		*args,
		**kwargs,
		):
		self.gd = gd
		self.method = method
		super().__init__(*args, **kwargs)
		self.h = 1.0 / (self.GridSize - 1)
		self.generator = self.init_generator(method)
	
	def config_optimizer(self, lr):
		self.optimizer = torch.optim.Adam(self.net.parameters(), lr=lr)
		# self.optimizer = torch.optim.LBFGS(self.net.parameters(), lr=lr, max_iter=1, history_size=10, line_search_fn='strong_wolfe')
		self.optimizer.zero_grad()
		self.lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimizer, gamma=0.95)

	@property
	def name(self):
		return f"{self.tag}-{self.GridSize}-{self.method}-{self.trainN}"

	def hyper_param_need2save(self):	
		param = {
			"gd": self.gd,
			"GridSize": self.GridSize,
			"area": self.area,
			"trainN": self.trainN,
			"valN": self.valN,
			"Batch_size": self.batch_size,
			"lr": self.lr,
			"epochs": self.total_epochs,
			"tag": self.tag,
			"net_kwargs": self.net_kwargs,
		}
		return param

	def init_traindl(self):
		train_ds = TrainDs(self.GridSize, self.dtype, self.device, self.trainN, self.area)
		self.train_dl = DataLoader(train_ds, self.batch_size, shuffle=True)

	def init_valdl(self):
		val_ds = ValDs(self.GridSize, self.dtype, self.device, self.valN, self.area)
		self.val_dl = DataLoader(val_ds, 1, shuffle=False)

	def reboot(self):
		self.init_traindl()
		self.init_valdl()
		self.config_optimizer(self.lr)
		# self.init_generator(self.method)

	def epoch_reboot(self):
		pass

	def init_generator(self, method):
		generator_method, max_iter = method.split('-')
		max_iter = int(max_iter)
		match generator_method:
			case 'Jac':
				generator = JacGenerator(
					self.batch_size,
					self.GridSize,
					self.dtype,
					self.device,
					max_iter,
					self.area,
					self.gd,
				)
			case 'Desc':
				generator = DescentGenerator(
					self.batch_size,
					self.GridSize,
					self.dtype,
					self.device,
					max_iter,
					self.area,
					self.gd,
				)
		return generator
	
	def train_step(self, x, f, kappa):
		pre = self.net(x)
		with torch.no_grad():
			label = self.generator(torch.clone(torch.detach(pre)), f, kappa)

		train_loss = self.loss_fn(pre, label)
		
		self.optimizer.zero_grad()
		train_loss.backward()
		self.optimizer.step()
		self.optimizer.zero_grad()

		return train_loss.item()

	def train_loop(self):
		self.net.train()
		for x, f, kappa in tqdm(
			self.train_dl, position=1, leave=True, desc="Training Loop:"
		):
			loss_val = self.train_step(x, f, kappa)
			self.train_global_idx += 1
			self.save_best_train_error(loss_val)
			self.writer.add_scalar("Train-Loss", loss_val, self.train_global_idx)

	def val_step(self, x, f, kappa, ans):
		pre = self.net(x)
		real_loss = self.loss_fn(hard_encode(pre, self.gd), ans)

		self.val_global_idx += 1
		self.writer.add_scalar("Val-RealLoss", real_loss.item(), self.val_global_idx)
		return pre, real_loss.item()

	def val_loop(self):
		self.net.eval()

		with torch.no_grad():
			for x, f, kappa, ans in self.val_dl:
				pre, real_loss = self.val_step(x, f, kappa, ans)
				self.val_global_idx += 1

				self.save_best_val_real(real_loss)

			self.val_plot(
				pre=hard_encode(pre, self.gd),
				kappa=kappa,
				ans=ans,
			)
			# self.writer.add_figure("ValFigure", fig, self.val_global_idx)
			
	def val_plot(self, pre, kappa, ans):
		pre = pre.cpu().numpy().squeeze()
		ans = ans.cpu().numpy().squeeze()
		kappa = kappa.cpu().numpy().squeeze()

		fig = multi_cof_draw_img(f"ValFigure", pre, ans, kappa, self.GridSize, a=1,)
		self.writer.add_figure(f"ValFigure", fig, self.val_global_idx)

		
	def fit_loop(self):
		self.reboot()
		for _ in tqdm(range(self.total_epochs[0]), desc='Training Epoch:', leave=True):
			self.epoch_reboot()
			self.train_loop()
			with torch.no_grad():
				self.val_loop()

			self.lr_scheduler.step()
			self.global_epoch_idx += 1
			torch.save(self.net.state_dict(), f'{self.model_save_path}/last.pt')


if __name__ == "__main__":
	from utils import set_seed

	set_seed(0)
	GridSize = 256
	# for max_iter in [1, 5, 25]:
	max_iter = 25
	tag = "Demo2-less"
	trainer = Trainer(
		gd=0,
		method=f'Jac-{max_iter}',
		dtype='float',
		device="cuda",
		area=((-1, -1), (1, 1)),
		GridSize=GridSize,
		trainN=10000,
		valN=100,
		batch_size=5,
		net_kwargs={
			"model_name": "UNet",
			"Block": "ResBottleNeck",
			"planes": 6,
			"in_channels": 1,
			"classes": 1,
			"GridSize": GridSize,
			"layer_nums": [2, 2, 2, 2],
			"factor": 2,
			"norm_method": "layer",
			"pool_method": "max",
			"padding": "same",
			"padding_mode": "zeros",
			"end_padding": "valid",
			"end_padding_mode": "zeros",
			"act": "tanh"
		},
		log_dir=f"./Demo2-logs",
		lr=1e-2,
		total_epochs=[150],
		tag=tag,
		loss_fn=F.mse_loss,
		model_save_path=f"./Demo2-model_save/",
		hyper_params_save_path=f"./Demo2-hyper_parameters",
	)
	trainer.fit_loop()
