package fem2.strategies;

import inf.jlinalg.ArrayVector;
import inf.jlinalg.BLAM;
import inf.jlinalg.IMatrix;
import inf.jlinalg.IVector;
import fem2.Debugger;
import fem2.Model;
import fem2.enu.EchoLevelType;
import fem2.enu.MassMatrixType;
import fem2.enu.NewmarkType;
import fem2.jlinalg.DiagonalMatrix;
import fem2.jlinalg.Solver;

/**
 * Generalized Newmark-alpha time integration based on Chung & Hulbert (standard
 * form)
 * 
 * @author hbui
 * 
 */
public abstract class NewmarkTimeIntegrationStrategy extends ImplicitTimeIntegrationStrategy {

	protected double alphaf;
	protected double alpham;
	protected double beta;
	protected double gamma;

	public NewmarkTimeIntegrationStrategy(Model m, MassMatrixType massType, double alpha1,
			double alpha2, double alphaf, double alpham, double beta, double gamma) {
		super(m, massType, alpha1, alpha2);
		this.alphaf = alphaf;
		this.alpham = alpham;
		this.beta = beta;
		this.gamma = gamma;
	}

	public NewmarkTimeIntegrationStrategy(Model m, MassMatrixType massType, double alpha1,
			double alpha2, NewmarkType type, double rhoInf) {
		super(m, massType, alpha1, alpha2);
		this.massType = massType;
		switch (type) {
		case NewmarkAlpha:
			this.alpham = 0;
			this.alphaf = 0;
			this.beta = 1 / Math.pow(rhoInf + 1, 2);
			this.gamma = (3 - rhoInf) / (2 * rhoInf + 2);
			break;
		case HilberAlpha:
			this.alpham = 0;
			this.alphaf = (1 - rhoInf) / (1 + rhoInf);
			this.beta = Math.pow(1 + this.alphaf, 2) / 4;
			this.gamma = 1.0 / 2 + this.alphaf;
			break;
		case BossakAlpha:
			this.alpham = (rhoInf - 1) / (rhoInf + 1);
			this.alphaf = 0;
			this.beta = Math.pow(1 - this.alpham, 2) / 4;
			this.gamma = 1.0 / 2 - this.alpham;
			break;
		case GeneralizedNewmarkAlpha:
			this.alpham = (2 * rhoInf - 1) / (rhoInf + 1);
			this.alphaf = rhoInf / (1 + rhoInf);
			this.beta = Math.pow(1 - alpham + alphaf, 2) / 4;
			this.gamma = 1.0 / 2 - this.alpham + this.alphaf;
			break;
		default:
			throw new Error("invalid Newmark method");
		}
	}

	@Override
	public void started(Solver leftHandSideSolver, ArrayVector rightHandSide, ArrayVector u) {
		super.started(leftHandSideSolver, rightHandSide, u);

		/*
		 * save the initial condition
		 */
		uHat_old = (ArrayVector) uHat.clone();
		udHat_old = (ArrayVector) udHat.clone();
		uddHat_old = (ArrayVector) uddHat.clone();
	}

	@Override
	public void initializeSystemMatricesAndVectors(Solver leftHandSideSolver,
			ArrayVector rightHandSide, ArrayVector u) {
		int n = model.getSize();

		initializeSolver(leftHandSideSolver);

		rightHandSide.setSize(n);

		u.setSize(n);
	}

	@Override
	public void assembleEffectiveSystem(IMatrix kteff, IVector reff) {
		int n = model.getSize();
		double dt = t - t_old;

		// Debugger.watch("uHat = ", uHat);
		// Debugger.watch("uHat_old = ", uHat_old);
		// Debugger.watch("udHat_old = ", udHat_old);
		// Debugger.watch("uddHat_old = ", uddHat_old);

		/*
		 * compute u_(n+1-alphaf)
		 */
		ArrayVector u_n_plus_1_minus_alphaf = (ArrayVector) uHat.clone();
		BLAM.scale(1 - alphaf, u_n_plus_1_minus_alphaf);
		BLAM.add(alphaf, uHat_old, u_n_plus_1_minus_alphaf);

		/*
		 * set the uHatGlobal for model
		 */
		model.setX(t, u_n_plus_1_minus_alphaf.getValues());

		/*
		 * compute M and Kt(u_n_plus_1_minus_alphaf)
		 */
		BLAM.zero(kteff);

		IMatrix M;
		if (massType == MassMatrixType.DIRECT_MASS_LUMPING) {
			M = new DiagonalMatrix(n);
		} else {
			M = (IMatrix) kteff.clone();
		}

		model.assembleKt(kteff);
		model.assembleM(M, massType);

		// Debugger.watch("alpham = ", alpham);
		// Debugger.watch("alphaf = ", alphaf);
		// Debugger.watch("beta = ", beta);
		// Debugger.watch("gamma = ", gamma);
		// Debugger.watch("kt = ", kteff);
		// Debugger.watch("M = ", M);

		/****************************************
		 * compute reff. At this point keff play the role of kt
		 ****************************************/
		/*
		 * 1: compute r_(n+1-alphaf)
		 */
		model.assembleR(1 - alphaf, ((ArrayVector) reff).getValues(), t);
		model.assembleR(alphaf, ((ArrayVector) reff).getValues(), t_old);

		/*
		 * 2: compute reff = reff - ri(u_n_plus_1_minus_alphaf) TODO: check if
		 * we can optimize by introducing assembleRi(alpha,ri)
		 */
		ArrayVector ri = new ArrayVector(n);
		model.assembleRi(ri.getValues());
		BLAM.add(-1.0, ri, reff);
		/*
		 * 3: compute Reff = Reff - D*ud_(n+1-alphaf)(ud_(n+1)(u_(n+1))). At
		 * this point u_(n+1) = uHat
		 */
		IVector ud_n_plus_1_minus_alphaf = (IVector) uHat.clone();
		BLAM.add(-1.0, uHat_old, ud_n_plus_1_minus_alphaf);
		double coeff = ((1 - alphaf) * gamma / beta) / dt;
		BLAM.scale(coeff, ud_n_plus_1_minus_alphaf);
		BLAM.add(1.0 - coeff * dt, udHat_old, ud_n_plus_1_minus_alphaf);
		BLAM.add(-(1 - alphaf) * (gamma - 2 * beta) * dt / (2 * beta), uddHat_old,
				ud_n_plus_1_minus_alphaf);
		BLAM.multiply(-alpha1, BLAM.NO_TRANSPOSE, M, ud_n_plus_1_minus_alphaf, 1.0, reff);
		BLAM.multiply(-alpha2, BLAM.NO_TRANSPOSE, kteff, ud_n_plus_1_minus_alphaf, 1.0, reff);
		/*
		 * 4: compute Reff = Reff - M*udd_(n+1-alpham)
		 */
		IVector udd_n_plus_1_minus_alpham = (IVector) uHat.clone();
		BLAM.add(-1.0, uHat_old, udd_n_plus_1_minus_alpham);
		coeff = ((1 - alpham) / beta) / Math.pow(dt, 2);
		BLAM.scale(coeff, udd_n_plus_1_minus_alpham);
		BLAM.add(-coeff * dt, udHat_old, udd_n_plus_1_minus_alpham);
		BLAM.add(1.0 - (1 - alpham) / (2 * beta), uddHat_old, udd_n_plus_1_minus_alpham);
		BLAM.multiply(-1.0, BLAM.NO_TRANSPOSE, M, udd_n_plus_1_minus_alpham, 1.0, reff);

		/*
		 * compute Keff
		 */
		double dampingCoeff = (gamma * (1 - alphaf) / beta) / dt;
		double massCoeff = ((1 - alpham) / beta) / Math.pow(dt, 2);
		BLAM.scale(1 - alphaf + alpha2 * dampingCoeff, kteff);
		BLAM.add(massCoeff + alpha1 * dampingCoeff, M, kteff);

		// Debugger.watch("kteff = ", kteff);
		// Debugger.watch("reff = ", reff);
	}

	@Override
	public void updateSolution(IVector dU) {
		double dt = t - t_old;

		if (getEchoLevel().contain(EchoLevelType.DEBUG)) {
			Debugger.watch("dU = ", dU);
		}

		/*
		 * update current uHat with dU
		 */
		BLAM.add(1.0, dU, uHat);

		/*
		 * compute current ud_(n+1). For this schem, it is not necessarily to
		 * update ud_(n+1) in each iteration but to make it consistent with
		 * other scheme, we just merely update it each iteration. How, it's just
		 * needed to be updated each time step. TODO: keep it in mind and update
		 * if needed
		 */
		BLAM.copy(uHat, udHat);
		BLAM.add(-1.0, uHat_old, udHat);
		BLAM.scale((gamma / beta) / dt, udHat);
		BLAM.add(-(gamma - beta) / beta, udHat_old, udHat);
		BLAM.add(-(gamma - 2 * beta) * dt / (2 * beta), uddHat_old, udHat);

		/*
		 * compute current udd_(n+1): same as above for udd_(n+1)
		 */
		BLAM.copy(uHat, uddHat);
		BLAM.add(-1.0, uHat_old, uddHat);
		BLAM.scale((1 / beta) / Math.pow(dt, 2), uddHat);
		BLAM.add(-(1 / beta) / dt, udHat_old, uddHat);
		BLAM.add(-(1 - 2 * beta) / (2 * beta), uddHat_old, uddHat);
	}

}
