大家好,我是阿赵。   继续介绍后处理的做法,这一期介绍的是模糊效果的做法。

一、模糊效果的原理

我们还是用这个角色作为背景来实现模糊效果

这是模糊后的效果

根据不同的参数,可以调整不同的模糊程度。

  在介绍做法之前,首先要明确一个基本的认知,模糊效果是非常消耗性能的一种处理。正常我们显示一张图片,每个像素根据UV坐标采样一次,得到颜色。而模糊处理,是每个像素点,除了采样自己,还要采样像素点周围的多个像素点,然后把采样得到的颜色值做不同的求平均值算法计算,而得到的模糊。   概括的说,这个采样周围多个点的做法,就是定义卷积核。下面介绍的三种不同的模糊算法,区别就在于卷积核的计算方式不同。   除了采样周围的多个像素点,我们还有可能需要多次进行采样,因为单次卷积核的采样计算,可能达不到我们想要的效果。   所以说,模糊效果是一种消耗非常大的处理方式。我们经常说Bloom辉光效果性能消耗非常大,其实也是因为,Bloom效果也是建立在模糊处理的效果上再做叠加的,性能消耗其实还是出在模糊上。

二、几种不同的模糊效果实现

1、均值模糊(BoxBlur)

  均值模糊是采样当前像素点附近的9个像素颜色(包括自己),然后把所有颜色相加,最后除以9。因为是把9个像素的颜色直接取平均值的,所以成为均值模糊。

  放大模糊后的画面,可以看到,单次的均值模糊,其实效果并不是很好,会出现一些方形的像素效果,模糊效果不是很平均。所以均值模糊也成为盒状模糊。   做模糊效果,是需要在C#脚本里面做多次Graphics.Blit的,不过这里先跳过这一步,先看看Shader怎样写,最后再去看C#的实现。

Shader "Hidden/AzhaoBoxBlur"

{

CGINCLUDE

#include "UnityCG.cginc"

sampler2D _MainTex;

float4 _MainTex_TexelSize;

float _BlurOffset;

fixed4 fragBoxBlur(v2f_img i) : SV_Target

{

half4 col = tex2D(_MainTex, i.uv);

//均值模糊

float4 colBlur = 0;

float4 blurUV = _MainTex_TexelSize.xyxy * float4(1, 1, -1, -1)*_BlurOffset;

colBlur += tex2D(_MainTex, i.uv + blurUV.xy);//1,1

colBlur += tex2D(_MainTex, i.uv + blurUV.xw);//1,-1

colBlur += tex2D(_MainTex, i.uv + blurUV.zy);//-1,1

colBlur += tex2D(_MainTex, i.uv + blurUV.zw);//-1,-1

col.rgb = colBlur.rgb / 4;

return col;

}

ENDCG

Properties

{

_MainTex ("Texture", 2D) = "white" {}

_BlurOffset("BlurOffset",Float) = 1

}

SubShader

{

// No culling or depth

Cull Off ZWrite Off ZTest Always

Pass

{

CGPROGRAM

#pragma vertex vert_img

#pragma fragment fragBoxBlur

ENDCG

}

Pass

{

CGPROGRAM

#pragma vertex vert_img

#pragma fragment fragBoxBlur

ENDCG

}

}

}

2、高斯模糊(GaussianBlur)

  高斯模糊应该是很常用的一种模糊手段,他的卷积核并不是附近9格,而是5X5范围,所以它的采样次数比均值模糊是要多很多的。并且,高斯模糊采样25个像素点,并不是直接求平均值的,而是有一个从中间往外减少的权重值,这个处理可以减轻均值模糊的盒状格子效果。

不过如果只是单次采样,其实这个效果还是不算很好。 下面是Shader

Shader "Hidden/AzhaoGaussianBlur"

{

CGINCLUDE

#include "UnityCG.cginc"

sampler2D _MainTex;

float4 _MainTex_TexelSize;

float _BlurOffset;

//高斯模糊横向

half4 frag_HorizontalBlur(v2f_img i) : SV_Target

{

half2 uv1 = i.uv + _MainTex_TexelSize.xy* half2(1, 0)*_BlurOffset * -2.0;

half2 uv2 = i.uv + _MainTex_TexelSize.xy* half2(1, 0)*_BlurOffset * -1.0;

half2 uv3 = i.uv;

half2 uv4 = i.uv + _MainTex_TexelSize.xy* half2(1, 0)*_BlurOffset * 1.0;

half2 uv5 = i.uv + _MainTex_TexelSize.xy* half2(1, 0)*_BlurOffset * 2.0;

half4 s = 0;

s += tex2D(_MainTex, uv1) * 0.05;

s += tex2D(_MainTex, uv2) * 0.25;

s += tex2D(_MainTex, uv3) * 0.40;

s += tex2D(_MainTex, uv4) * 0.25;

s += tex2D(_MainTex, uv5) * 0.05;

return s;

}

//高斯模糊纵向

half4 frag_VerticalBlur(v2f_img i) : SV_Target

{

half2 uv1 = i.uv + _MainTex_TexelSize.xy* half2(0, 1)*_BlurOffset * -2.0;

half2 uv2 = i.uv + _MainTex_TexelSize.xy* half2(0, 1)*_BlurOffset * -1.0;

half2 uv3 = i.uv;

half2 uv4 = i.uv + _MainTex_TexelSize.xy* half2(0, 1)*_BlurOffset * 1.0;

half2 uv5 = i.uv + _MainTex_TexelSize.xy* half2(0, 1)*_BlurOffset * 2.0;

half4 s = 0;

s += tex2D(_MainTex, uv1) * 0.05;

s += tex2D(_MainTex, uv2) * 0.25;

s += tex2D(_MainTex, uv3) * 0.40;

s += tex2D(_MainTex, uv4) * 0.25;

s += tex2D(_MainTex, uv5) * 0.05;

return s;

}

ENDCG

Properties

{

_MainTex ("Texture", 2D) = "white" {}

_BlurOffset("BlurOffset", Float) = 1

}

SubShader

{

// No culling or depth

Cull Off ZWrite Off ZTest Always

Pass

{

CGPROGRAM

#pragma vertex vert_img

#pragma fragment frag_HorizontalBlur

ENDCG

}

Pass

{

CGPROGRAM

#pragma vertex vert_img

#pragma fragment frag_VerticalBlur

ENDCG

}

}

}

3、Kawas模糊

  这个算法是我从网上学习的,根据介绍Kawase 模糊的思路是对距离当前像素越来越远的地方对四个角进行采样,且在两个大小相等的纹理之间进行乒乓式的blit,创新点在于,采用了随迭代次数移动的blur kernel,而不是类似高斯模糊,或均值模糊一样从头到尾固定的卷积核。

  可以看到,这种模糊方式比上面两种效果是要好很多,那些马赛克条纹一样的东西基本看不到了。

Shader "Hidden/AzhaoKawaseBlur"

{

CGINCLUDE

#include "UnityCG.cginc"

uniform sampler2D _MainTex;

uniform float4 _MainTex_TexelSize;

uniform half _BlurOffset;

struct v2f_DownSample

{

float4 pos: SV_POSITION;

float2 uv: TEXCOORD1;

float4 uv01: TEXCOORD2;

float4 uv23: TEXCOORD3;

};

struct v2f_UpSample

{

float4 pos: SV_POSITION;

float4 uv01: TEXCOORD1;

float4 uv23: TEXCOORD2;

float4 uv45: TEXCOORD3;

float4 uv67: TEXCOORD4;

};

v2f_DownSample Vert_DownSample(appdata_img v)

{

v2f_DownSample o;

o.pos = UnityObjectToClipPos(v.vertex);

_MainTex_TexelSize = 0.5 * _MainTex_TexelSize;

float2 uv = v.texcoord;

o.uv = uv;

o.uv01.xy = uv - _MainTex_TexelSize * float2(1 + _BlurOffset, 1 + _BlurOffset);//top right

o.uv01.zw = uv + _MainTex_TexelSize * float2(1 + _BlurOffset, 1 + _BlurOffset);//bottom left

o.uv23.xy = uv - float2(_MainTex_TexelSize.x, -_MainTex_TexelSize.y) * float2(1 + _BlurOffset, 1 + _BlurOffset);//top left

o.uv23.zw = uv + float2(_MainTex_TexelSize.x, -_MainTex_TexelSize.y) * float2(1 + _BlurOffset, 1 + _BlurOffset);//bottom right

return o;

}

half4 Frag_DownSample(v2f_DownSample i) : SV_Target

{

half4 sum = tex2D(_MainTex, i.uv) * 4;

sum += tex2D(_MainTex, i.uv01.xy);

sum += tex2D(_MainTex, i.uv01.zw);

sum += tex2D(_MainTex, i.uv23.xy);

sum += tex2D(_MainTex, i.uv23.zw);

return sum * 0.125;

}

v2f_UpSample Vert_UpSample(appdata_img v)

{

v2f_UpSample o;

o.pos = UnityObjectToClipPos(v.vertex);

float2 uv = v.texcoord;

_MainTex_TexelSize = 0.5 * _MainTex_TexelSize;

_BlurOffset = float2(1 + _BlurOffset, 1 + _BlurOffset);

o.uv01.xy = uv + float2(-_MainTex_TexelSize.x * 2, 0) * _BlurOffset;

o.uv01.zw = uv + float2(-_MainTex_TexelSize.x, _MainTex_TexelSize.y) * _BlurOffset;

o.uv23.xy = uv + float2(0, _MainTex_TexelSize.y * 2) * _BlurOffset;

o.uv23.zw = uv + _MainTex_TexelSize * _BlurOffset;

o.uv45.xy = uv + float2(_MainTex_TexelSize.x * 2, 0) * _BlurOffset;

o.uv45.zw = uv + float2(_MainTex_TexelSize.x, -_MainTex_TexelSize.y) * _BlurOffset;

o.uv67.xy = uv + float2(0, -_MainTex_TexelSize.y * 2) * _BlurOffset;

o.uv67.zw = uv - _MainTex_TexelSize * _BlurOffset;

return o;

}

half4 Frag_UpSample(v2f_UpSample i) : SV_Target

{

half4 sum = 0;

sum += tex2D(_MainTex, i.uv01.xy);

sum += tex2D(_MainTex, i.uv01.zw) * 2;

sum += tex2D(_MainTex, i.uv23.xy);

sum += tex2D(_MainTex, i.uv23.zw) * 2;

sum += tex2D(_MainTex, i.uv45.xy);

sum += tex2D(_MainTex, i.uv45.zw) * 2;

sum += tex2D(_MainTex, i.uv67.xy);

sum += tex2D(_MainTex, i.uv67.zw) * 2;

return sum * 0.0833;

}

ENDCG

Properties

{

_MainTex ("Texture", 2D) = "white" {}

_BlurOffset("BlurOffset", Float) = 1

}

SubShader

{

Cull Off ZWrite Off ZTest Always

Pass

{

CGPROGRAM

#pragma vertex Vert_DownSample

#pragma fragment Frag_DownSample

ENDCG

}

Pass

{

CGPROGRAM

#pragma vertex Vert_UpSample

#pragma fragment Frag_UpSample

ENDCG

}

}

}

4、双重模糊

  这里对比一下均值模糊和高斯模糊。   高斯模糊的效果比均值模糊要好一些,但马赛克条纹的情况还是存在,而且性能消耗巨大。 为了解决这个问题,我们可以进行双重模糊的操作。 具体的做法是,在C#做Graphics.Blit的时候,开2个循环。 第一个循环里面,每次对RenderTexture的宽高除以2,做降采样。 第二个循环里面,每次对RenderTexture的宽高乘以2,做升采样。 经过了RenderTexture的宽高改变之后,原来的马赛克条纹效果,基本上就可以消除。   双重采样在同样的参数设置下,效果是比高斯模糊更好,更平滑的。不过这个做法要注意的一点,是因为创建了多个RenderTexture,必须记得做对应的释放操作。

三、C#代码

  下面的代码,是C#端兼容了上面说的三种不同的模糊算法,然后做了双重采样之后的结果。

using System.Collections;

using System.Collections.Generic;

using UnityEngine;

public class BlurCtrl : MonoBehaviour

{

private Material blurMat;

public bool isBlur = false;

[Range(0, 4)]

public float blurSize = 0;

[Range(-3, 3)]

public float blurOffset = 1;

[Range(1, 3)]

public int blurType = 3;

void Start()

{

}

void Update()

{

}

private Material GetBlurMat(int bType)

{

if (bType == 1)

{

return new Material(Shader.Find("Hidden/AzhaoBoxBlur"));

}

else if (bType == 2)

{

return new Material(Shader.Find("Hidden/AzhaoGaussianBlur"));

}

else if (bType == 3)

{

return new Material(Shader.Find("Hidden/AzhaoKawaseBlur"));

}

else

{

return null;

}

}

private void ReleaseRT(RenderTexture rt)

{

if (rt != null)

{

RenderTexture.ReleaseTemporary(rt);

}

}

private bool CheckNeedCreateBlurMat(Material mat, int bType)

{

if (mat == null)

{

return true;

}

if (mat.shader == null)

{

return true;

}

if (bType == 1)

{

if (mat.shader.name != "Hidden/AzhaoBoxBlur")

{

return true;

}

else

{

return false;

}

}

else if (bType == 2)

{

if (mat.shader.name != "Hidden/AzhaoGaussianBlur")

{

return true;

}

else

{

return false;

}

}

else if (bType == 3)

{

if (mat.shader.name != "Hidden/AzhaoKawaseBlur")

{

return true;

}

else

{

return false;

}

}

else

{

return false;

}

}

private void BlurFun(RenderTexture source, RenderTexture destination, float blurTime, int bType, float offset)

{

if (CheckNeedCreateBlurMat(blurMat, bType) == true)

{

blurMat = GetBlurMat(bType);

}

if (blurMat == null || blurMat.shader == null || blurMat.shader.isSupported == false)

{

return;

}

blurMat.SetFloat("_BlurOffset", offset);

float width = source.width;

float height = source.height;

int w = Mathf.FloorToInt(width);

int h = Mathf.FloorToInt(height);

RenderTexture rt1 = RenderTexture.GetTemporary(w, h);

RenderTexture rt2 = RenderTexture.GetTemporary(w, h);

Graphics.Blit(source, rt1);

//降采样

for (int i = 0; i < blurTime; i++)

{

ReleaseRT(rt2);

width = width / 2;

height = height / 2;

w = Mathf.FloorToInt(width);

h = Mathf.FloorToInt(height);

rt2 = RenderTexture.GetTemporary(w, h);

Graphics.Blit(rt1, rt2, blurMat, 0);

width = width / 2;

height = height / 2;

w = Mathf.FloorToInt(width);

h = Mathf.FloorToInt(height);

ReleaseRT(rt1);

rt1 = RenderTexture.GetTemporary(w, h);

Graphics.Blit(rt2, rt1, blurMat, 1);

}

//升采样

for (int i = 0; i < blurTime; i++)

{

ReleaseRT(rt2);

width = width * 2;

height = height * 2;

w = Mathf.FloorToInt(width);

h = Mathf.FloorToInt(height);

rt2 = RenderTexture.GetTemporary(w, h);

Graphics.Blit(rt1, rt2, blurMat, 0);

width = width * 2;

height = height * 2;

w = Mathf.FloorToInt(width);

h = Mathf.FloorToInt(height);

ReleaseRT(rt1);

rt1 = RenderTexture.GetTemporary(w, h);

Graphics.Blit(rt2, rt1, blurMat, 1);

}

Graphics.Blit(rt1, destination);

ReleaseRT(rt1);

rt1 = null;

ReleaseRT(rt2);

rt2 = null;

return;

}

private void OnRenderImage(RenderTexture source, RenderTexture destination)

{

if (isBlur == true)

{

if (blurSize > 0)

{

BlurFun(source, source, blurSize, blurType, blurOffset);

}

Graphics.Blit(source, destination);

}

else

{

Graphics.Blit(source, destination);

}

}

}

推荐链接

评论可见,请评论后查看内容,谢谢!!!
 您阅读本篇文章共花了: