﻿//https://blog.csdn.net/tianhai110/article/details/5684128?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-7.channel_param&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-7.channel_param
Shader "ImageEffect/HBAO"
{
    Properties
    {
        [HideInInspector]_MainTex ("Texture", 2D) = "white" {}
    }

	CGINCLUDE
    #include "UnityCG.cginc"
	struct appdata
    {
        float4 vertex : POSITION;
        float2 uv : TEXCOORD0;
    };

    struct v2f
    {
        float2 uv : TEXCOORD0;
        float4 vertex : SV_POSITION;
		float3 viewVec : TEXCOORD1;
		float3 viewRay : TEXCOORD2;
    };

	sampler2D _MainTex;
	//获取深度法线图
	sampler2D _CameraDepthNormalsTexture;
    
	//Ao
	float _AOStrength;
	int _RayAngleStep;
	int _RayMarchingStep;
	float _RayMarchingRadius;
	float _AngleBias;

    v2f vert_Ao (appdata v)
    {
        v2f o;
        o.vertex = UnityObjectToClipPos(v.vertex);
        o.uv = v.uv;
		
		//计算相机空间中的像素方向（相机到像素的方向）
		//https://zhuanlan.zhihu.com/p/92315967
		//屏幕纹理坐标
		// float4 screenPos = ComputeScreenPos(o.vertex);
		// // NDC position
		// float4 ndcPos = (screenPos / screenPos.w) * 2 - 1;
		// // 计算至远屏幕方向
		// float3 clipVec = float3(ndcPos.x, ndcPos.y, 1.0) * _ProjectionParams.z;
		// o.viewVec = mul(unity_CameraInvProjection, clipVec.xyzz).xyz;
        return o;
    }

	
	inline float2 RotateDirections(float2 dir, float2 rot) 
	{
		return float2(dir.x * rot.x - dir.y * rot.y,
					dir.x * rot.y + dir.y * rot.x);
	}

	inline float2 GetRayMarchingDir(float angle)
	{
		float sinValue, cosValue;
		sincos(angle, sinValue, cosValue);
		return RotateDirections(float2(cosValue, sinValue), float2(1.0, 0));
	}

	float3 reconstructViewPos(float2 uv)
	{
		float3x3 proj = (float3x3)unity_CameraProjection;
		float2 p11_22 =  float2(unity_CameraProjection._11, unity_CameraProjection._22);
		float2 p13_31 =  float2(unity_CameraProjection._13, unity_CameraProjection._22);
		float depth;
		float3 viewNormal;
		float4 cdn = tex2D(_CameraDepthNormalsTexture, uv);
		DecodeDepthNormal(cdn, depth, viewNormal);
		depth *= _ProjectionParams.z;
		return float3((uv * 2.0 - 1.0 - p13_31) / p11_22 * (depth), depth);
	}

	//Ao计算
    fixed4 frag_Ao (v2f i) : SV_Target
    {

		//采样获得深度值和法线值
		float3 viewNormal;
		float linear01Depth;
		float4 depthnormal = tex2D(_CameraDepthNormalsTexture,i.uv);
		DecodeDepthNormal(depthnormal,linear01Depth,viewNormal);

		//获取像素相机屏幕坐标位置
		float3 viewPos = reconstructViewPos(i.uv);

		//获取像素相机屏幕法线，法相z方向相对于相机为负（so 需要乘以-1置反），并处理成单位向量
		viewNormal = normalize(viewNormal) * float3(1, 1, -1);
		float deltaAngle = 2.0 * UNITY_PI / _RayAngleStep;
		// 1/屏幕分辨率宽w , 1/屏幕分辨率高h
		float2 InvScreenWH = _ScreenParams.zw - 1.0;

		//_RayMarchingRadius 屏幕空间的采样半径
		float rayMarchingStepSize = _RayMarchingRadius/_RayMarchingStep; 
		
		//采样核心
		float ao = 0;

		for(int j = 1; j <= _RayAngleStep; j++)
		{
			float uvAngle = deltaAngle * j;
			float maxHAngle = _AngleBias;

			//两个叉乘求tangent线 
			float3 marchingDir =  float3(GetRayMarchingDir(uvAngle), 0.0f);
			float3 temp = cross(marchingDir , viewNormal);
			float3 tangent = cross(temp, viewNormal);
			float sinTangentAngle = length(tangent.z)/length(tangent);
			sinTangentAngle *= tangent.z > 0.0f ? 1.0f : -1.0f;

			float wAO = 0;

			for(int k = 1; k < _RayMarchingStep; k++)
			{
				float2 deltaUV = round( marchingDir * rayMarchingStepSize * k) * InvScreenWH;
				float2 stepUV = i.uv + deltaUV ;
				float3 pointPos = reconstructViewPos(stepUV);
				float3 diffPos = pointPos - viewPos;
				float len = length(diffPos);
				int within = len < _RayMarchingRadius ? 1.0 : 0.0 ;
				int exist = diffPos.z < -0.04 ? 1.0 : 0.0 ;//差值z需小于0，并且加一点bias//这个漏了，明显错误，想了好久
				float sinHAngle = length(diffPos.z)/len * exist * within;

				if(sinHAngle > maxHAngle)
				{
					maxHAngle = sinHAngle;
					if(sinHAngle > _AngleBias){
						wAO += (sinHAngle - sinTangentAngle) * ( 1 - pow(len/_RayMarchingRadius, 2));
					}
				}

			}

			// if(maxHAngle  > _AngleBias)
			// {
			// 	ao += maxHAngle - sinTangentAngle;
			// }
			ao += wAO;
		}

		ao = ao / _RayAngleStep;
		float4 color;
		color = max(0.0, 1 - ao * _AOStrength);
		color.a = 1;
		return color;
    }
	
	//Blur
	float _BilaterFilterFactor;
	float2 _MainTex_TexelSize;
	float2 _BlurRadius;

	///基于法线的双边滤波（Bilateral Filter）
	//https://blog.csdn.net/puppet_master/article/details/83066572
	float3 GetNormal(float2 uv)
	{
		float4 cdn = tex2D(_CameraDepthNormalsTexture, uv);	
		return DecodeViewNormalStereo(cdn);
	}

	half CompareNormal(float3 nor1,float3 nor2)
	{
		return smoothstep(_BilaterFilterFactor,1.0,dot(nor1,nor2));
	}
	
	fixed4 frag_Blur (v2f i) : SV_Target
	{
		//_MainTex_TexelSize -> https://forum.unity.com/threads/_maintex_texelsize-whats-the-meaning.110278/
		float2 delta = _MainTex_TexelSize.xy * _BlurRadius.xy;
		
		float2 uv = i.uv;
		float2 uv0a = i.uv - delta;
		float2 uv0b = i.uv + delta;	
		float2 uv1a = i.uv - 2.0 * delta;
		float2 uv1b = i.uv + 2.0 * delta;
		float2 uv2a = i.uv - 3.0 * delta;
		float2 uv2b = i.uv + 3.0 * delta;
		
		float3 normal = GetNormal(uv);
		float3 normal0a = GetNormal(uv0a);
		float3 normal0b = GetNormal(uv0b);
		float3 normal1a = GetNormal(uv1a);
		float3 normal1b = GetNormal(uv1b);
		float3 normal2a = GetNormal(uv2a);
		float3 normal2b = GetNormal(uv2b);
		
		fixed4 col = tex2D(_MainTex, uv);
		fixed4 col0a = tex2D(_MainTex, uv0a);
		fixed4 col0b = tex2D(_MainTex, uv0b);
		fixed4 col1a = tex2D(_MainTex, uv1a);
		fixed4 col1b = tex2D(_MainTex, uv1b);
		fixed4 col2a = tex2D(_MainTex, uv2a);
		fixed4 col2b = tex2D(_MainTex, uv2b);
		
		half w = 0.37004405286;
		half w0a = CompareNormal(normal, normal0a) * 0.31718061674;
		half w0b = CompareNormal(normal, normal0b) * 0.31718061674;
		half w1a = CompareNormal(normal, normal1a) * 0.19823788546;
		half w1b = CompareNormal(normal, normal1b) * 0.19823788546;
		half w2a = CompareNormal(normal, normal2a) * 0.11453744493;
		half w2b = CompareNormal(normal, normal2b) * 0.11453744493;
		
		half3 result;
		result = w * col.rgb;
		result += w0a * col0a.rgb;
		result += w0b * col0b.rgb;
		result += w1a * col1a.rgb;
		result += w1b * col1b.rgb;
		result += w2a * col2a.rgb;
		result += w2b * col2b.rgb;
		
		result /= w + w0a + w0b + w1a + w1b + w2a + w2b;
		return fixed4(result, 1.0);
	}

	//应用AO贴图
	
	sampler2D _AOTex;

	fixed4 frag_Composite(v2f i) : SV_Target
	{
		fixed4 col = tex2D(_MainTex, i.uv);
		fixed4 ao = tex2D(_AOTex, i.uv);
		col.rgb *= ao.r;
		return col;
	}

	ENDCG

    SubShader
    {	
		Cull Off ZWrite Off ZTest Always

		//Pass 0 : Generate AO 
		Pass
        {
            CGPROGRAM
            #pragma vertex vert_Ao
            #pragma fragment frag_Ao
            ENDCG
        }
		//Pass 1 : Bilateral Filter Blur
		Pass
		{
			CGPROGRAM
			#pragma vertex vert_Ao
			#pragma fragment frag_Blur
			ENDCG
		}

		//Pass 2 : Composite AO
		Pass
		{
			CGPROGRAM
			#pragma vertex vert_Ao
			#pragma fragment frag_Composite
			ENDCG
		}
    }
}
