﻿using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using Microsoft.Xna.Framework.Graphics;
using Microsoft.Xna.Framework.Content;
using Microsoft.Xna.Framework;
using CS4730_XNA_Graphics_Tutorial.Lights;

namespace CS4730_XNA_Graphics_Tutorial
{
    public class Scene
    {
        protected GraphicsDevice graphics;
        protected QuadRender quadRender;

        // Keep a list of all renderable objects - newly-spawned objects must 
        // be added to this list, and old discarded objects must be removed (or
        // you may see very noticable performance degredation).
        public List<Renderable> renderables { get; protected set; }

        // Render targets:
        // Stores the result of the Phong Lighting
        private RenderTarget2D lightingRT;
        // Stores the result of rendering depth
        private RenderTarget2D depthRT;
        // Motion blur "accumulation" buffer - this will store the blended
        // result of previous renders
        private RenderTarget2D motionAccumRT;

        // Effects:
        // This is useful for debugging our shaders because it allows us to
        // test with known, working effect
        private PhongEffect phong;
        private Effect phongEffect;
        // An effect to render depth (z-values) to texture
        private Effect depthEffect;
        // An effect to simply draw a texturemapped square
        // (this is useful for post-processing effects, debugging 
        // intermediate steps, and splitscreen)
        private Effect passthroughEffect;
        // This is similar to passthroughEffect, except it also takes the 
        // depth render-target as a parameter as well as several fog parameters.
        private Effect fogEffect;

        public Scene(GraphicsDevice graphics, ContentManager content)
        {
            this.graphics = graphics;
            this.quadRender = new QuadRender(graphics);

            this.renderables = new List<Renderable>();
            initRenderTargets();

            phongEffect = content.Load<Effect>("Assets/Effects/PhongMultipass");
            depthEffect = content.Load<Effect>("Assets/Effects/Depth");
            fogEffect = content.Load<Effect>("Assets/Effects/Fog");
            passthroughEffect = content.Load<Effect>("Assets/Effects/Passthrough");

            phong = new PhongEffect(phongEffect);

            phong.Lights.Add(new PhongAmbientLight() { Color = new Vector3(0.12f, 0.12f, 0.12f) });
            phong.Lights.Add(new PhongDirectionalLight() { Color = new Vector3(0.5f, 0.6f, 0.5f), Direction = new Vector3(-1f, -1f, -1.25f) });
            phong.Lights.Add(new PhongDirectionalLight() { Color = new Vector3(0.5f, 0.5f, 0.6f), Direction = new Vector3(1f, 1f, 1.25f) });
        }

        private void initRenderTargets()
        {
            int width = graphics.PresentationParameters.BackBufferWidth;
            int height = graphics.PresentationParameters.BackBufferHeight;

            // create a render target with the width and height of the window 
            // to hold color data.  We don't mind if the contents are discarded 
            // when we're done rendering.
            lightingRT = new RenderTarget2D(graphics, width, height, false,
                SurfaceFormat.Color, DepthFormat.Depth24, 0, RenderTargetUsage.DiscardContents);

            // create a render target with the width and height of the window 
            // to hold a single value (this will be a single 32-bit float to 
            // hold our depth data).  We don't mind if the contents are discarded 
            // when we're done rendering.
            depthRT = new RenderTarget2D(graphics, width, height, false,
                SurfaceFormat.Single, DepthFormat.Depth24, 0, RenderTargetUsage.DiscardContents);

            // create a render target with the width and height of the window 
            // to hold a color data.  We DON"T want the contents to be discarded between renders.
            motionAccumRT = new RenderTarget2D(graphics, width, height, false,
                SurfaceFormat.Color, DepthFormat.Depth24, 0, RenderTargetUsage.PreserveContents);
        }

        public void renderPhong(GameTime gt, Camera camera)
        {
            foreach (Renderable renderable in renderables)
            {

                phong.MaterialDiffuse = Vector3.One;
                phong.MaterialSpecular = Vector3.One;
                phong.MaterialSpecularFalloff = 5.0f;
                phong.Texture = renderable.getTexture();


                phong.ViewerLocation = -camera.view.Translation;
                phong.World = renderable.getTransform();
                phong.View = camera.view;
                phong.Projection = camera.projection;

                phong.Render(renderable, gt);
            }
        }

        public void renderDepth(GameTime gt, Camera camera)
        {
            graphics.Clear(new Color(0,0,0,0));
            foreach (Renderable r in renderables)
            {
                Matrix world = r.getTransform();
                Matrix view = camera.view;
                Matrix.Invert(view);
                Matrix projection = camera.projection;
                depthEffect.Parameters["WVP"].SetValue(world * view * projection);
                r.render(gt, depthEffect);
            }
        }

        public void renderPictureInPictureExample(GameTime gt, Camera camera)
        {
            // this will be the other camera we render from
            Camera otherCamera = new Camera(camera.AspectRatio, camera.FieldOfView);
            // we translate the camera left, down, and backward to illustrate 
            // how we're drawing from two separate view points
            otherCamera.view = camera.view * Matrix.CreateTranslation(300, 100, -300);

            // render to the lighting render target, this will then be rendered 
            // as the "window" in the corner of the screen.
            graphics.SetRenderTarget(lightingRT);
            // we don't want to do any alpha blending when we write to the buffer
            graphics.BlendState = BlendState.Opaque;
            // draw a background color
            graphics.Clear(Color.Black);
            // draw the scene with phong lighting using the other camera view point
            renderPhong(gt, otherCamera);

            // now we will render to the screen
            graphics.SetRenderTarget(null);
            // draw a background color
            graphics.Clear(Color.CornflowerBlue);
            // draw the scene using the original camera
            renderPhong(gt, camera);
            
            // use alpha blending
            graphics.BlendState = BlendState.AlphaBlend;
            // draw the previously-rendered scene onto the lower-right corner using the passthrough effect
            // we set the color multiplier such that the alpha values go to 0.75 to allow transparency
            passthroughEffect.Parameters["colorMultiplier"].SetValue(new Vector4(1, 1, 1, 0.75f));
            // set the input texture to be the render target we previously drew on
            passthroughEffect.Parameters["inputTex"].SetValue(lightingRT);
            quadRender.RenderQuad(new Vector2(0.5f, -1.0f), new Vector2(1.0f, -0.5f), passthroughEffect);
        }

        public void renderMotionBlurExample(GameTime gt, Camera camera)
        {
            // this determins the amount of "persistence", or the strength of the motion blur effect
            // decrease this toward 0 to increase the strength of the motion blur effect
            // increase this toward 1 to decrease the blur
            float blurFactor = 0.25f;

            // render onto the lighting render target, this will then be rendered 
            // as the "window" in the corner of the screen.
            graphics.SetRenderTarget(lightingRT);
            // we don't want to do any alpha blending when we write to the buffer
            graphics.BlendState = BlendState.Opaque;
            // draw a background color
            graphics.Clear(Color.CornflowerBlue);
            // draw the scene with phong lighting using the other camera view point
            renderPhong(gt, camera);

            // now we will render to the accumulation buffer
            graphics.SetRenderTarget(motionAccumRT);
            // set the blend state so that final_color = blurFactor * new_color + (1 - blurFactor) * original_color
            BlendState bs = new BlendState();
            bs.ColorSourceBlend = Blend.BlendFactor;
            bs.ColorDestinationBlend = Blend.InverseBlendFactor;
            bs.AlphaSourceBlend = Blend.Zero;
            bs.AlphaDestinationBlend = Blend.One;
            bs.BlendFactor = new Color(blurFactor, blurFactor, blurFactor, blurFactor);
            graphics.BlendState = bs;
            passthroughEffect.Parameters["colorMultiplier"].SetValue(new Vector4(1, 1, 1, 1));
            // set the input texture to be the render target we previously drew on
            passthroughEffect.Parameters["inputTex"].SetValue(lightingRT);
            quadRender.RenderFullScreenQuad(passthroughEffect);

            // draw the accumulated result to the screen
            graphics.SetRenderTarget(null);
            graphics.BlendState = BlendState.Opaque;
            passthroughEffect.Parameters["colorMultiplier"].SetValue(new Vector4(1, 1, 1, 1));
            passthroughEffect.Parameters["inputTex"].SetValue(motionAccumRT);
            quadRender.RenderFullScreenQuad(passthroughEffect);
        }

        public void renderFogExample(GameTime gt, Camera camera)
        {
            // we don't want to blend anything, so we reset this
            graphics.BlendState = BlendState.Opaque;

            // dark blue fog color
            Color fogColor = new Color(0, 0, 0.25f, 1.0f);

            // render depth to depthRT
            graphics.SetRenderTarget(depthRT);
            renderDepth(gt, camera);

            // render our lit scene to lightingRT
            graphics.SetRenderTarget(lightingRT);
            graphics.Clear(fogColor);
            renderPhong(gt, camera);

            // combine depthRT and lightingRT with the fog parameters and render to the screen
            graphics.SetRenderTarget(null);
            // bluish fog color
            fogEffect.Parameters["fogColor"].SetValue(fogColor.ToVector4());
            fogEffect.Parameters["fogConst"].SetValue(1.00f);
            fogEffect.Parameters["fogLinear"].SetValue(0.01f);
            fogEffect.Parameters["fogQuad"].SetValue(0.0001f);
            fogEffect.Parameters["colorTex"].SetValue(lightingRT);
            fogEffect.Parameters["depthTex"].SetValue(depthRT);
            quadRender.RenderFullScreenQuad(fogEffect);
        }

        public void render(GameTime gt, Camera camera)
        {
            //renderPhong(gt, camera);
            //renderFogExample(gt, camera);
            // renderPictureInPictureExample(gt, camera);
            // renderMotionBlurExample(gt, camera);
            // renderDepth(gt, camera);
            return;
            graphics.BlendState = BlendState.Opaque;
            graphics.SetRenderTarget(lightingRT);
            graphics.Clear(Color.CornflowerBlue);
            renderPhong(gt, camera);

            graphics.SetRenderTarget(null);
            passthroughEffect.Parameters["colorMultiplier"].SetValue(new Vector4(1, 1, 1, 1));
            passthroughEffect.Parameters["inputTex"].SetValue(lightingRT);
            quadRender.RenderFullScreenQuad(passthroughEffect);
        }
    }
    public interface Renderable
    {

        /// <summary>
        /// Renders a model with the specified effect.
        /// </summary>
        /// <param name="gt">The current game time (may be useful for animation)</param>
        /// <param name="effect">The effect to use</param>
        void render(GameTime gt, Effect effect);

        /// <summary>
        /// </summary>
        /// <returns>The model-space to world-space transformation.</returns>
        Matrix getTransform();

        Texture2D getTexture();
    }

    public class Camera
    {
        public Matrix view { get; set; }
        public Matrix projection { get; protected set; }
        private void recalculateProjection()
        {
            projection = Matrix.CreatePerspectiveFieldOfView(MathHelper.ToRadians(fieldOfView), aspectRatio, 0.1f, 10000.0f);
        }
        private float aspectRatio = 1.0f;
        private float fieldOfView = 45.0f;
        public float AspectRatio
        {
            get
            {
                return aspectRatio;
            }
            set
            {
                aspectRatio = value;
                recalculateProjection();
            }
        }
        public float FieldOfView
        {
            get
            {
                return fieldOfView;
            }
            set
            {
                fieldOfView = value;
                recalculateProjection();
            }
        }

        public Camera(float aspectRatio, float fieldOfView)
        {
            this.AspectRatio = aspectRatio;
            this.FieldOfView = fieldOfView;
            view = Matrix.Identity;
        }

        public Camera()
            : this(0.5f, 45.0f)
        {

        }
    }
    /// <summary>
    /// 
    /// </summary>
    public class QuadRender
    {
        private GraphicsDevice myDevice;
        private VertexPositionTexture[] verts; // vertex data
        private short[] ib = null; // index buffer

        public QuadRender(GraphicsDevice device)
        {
            myDevice = device;
            verts = new VertexPositionTexture[]
                        {
                            new VertexPositionTexture(
                                new Vector3(0,0,0),
                                new Vector2(1,1)),
                            new VertexPositionTexture(
                                new Vector3(0,0,0),
                                new Vector2(0,1)),
                            new VertexPositionTexture(
                                new Vector3(0,0,0),
                                new Vector2(0,0)),
                            new VertexPositionTexture(
                                new Vector3(0,0,0),
                                new Vector2(1,0))
                        };
            ib = new short[] { 0, 1, 2, 2, 3, 0 };
        }

        /// Draws a fullscreen quad.
        public void RenderFullScreenQuad(Effect effect)
        {
            RenderQuad(Vector2.One * -1, Vector2.One, effect);
        }

        /// Draws a quad with corners at v1 and v2.
        public void RenderQuad(Vector2 v1, Vector2 v2, Effect effect)
        {
            effect.CurrentTechnique.Passes[0].Apply();
            verts[0].Position.X = v2.X;
            verts[0].Position.Y = v1.Y;

            verts[1].Position.X = v1.X;
            verts[1].Position.Y = v1.Y;

            verts[2].Position.X = v1.X;
            verts[2].Position.Y = v2.Y;

            verts[3].Position.X = v2.X;
            verts[3].Position.Y = v2.Y;

            myDevice.DrawUserIndexedPrimitives
                (PrimitiveType.TriangleList, verts, 0, 4, ib, 0, 2);
        }
    }
}