在unity中获取深度纹理是比较简单的,通过将脚本挂在摄像机上,代码如下,参考自UnityShader入门精要.
用于生成材质的一个基类:
using UnityEngine;
using System.Collections;
[ExecuteInEditMode]
[RequireComponent (typeof(Camera))]
public class PostEffectsBase : MonoBehaviour {
// Called when start
protected void CheckResources() {
bool isSupported = CheckSupport();
if (isSupported == false) {
NotSupported();
}
}
// Called in CheckResources to check support on this platform
[System.Obsolete]
protected bool CheckSupport() {
if (SystemInfo.supportsImageEffects == false || SystemInfo.supportsRenderTextures == false) {
Debug.LogWarning("This platform does not support image effects or render textures.");
return false;
}
return true;
}
// Called when the platform doesn't support this effect
protected void NotSupported() {
enabled = false;
}
protected void Start() {
CheckResources();
}
// Called when need to create the material used by this effect
protected Material CheckShaderAndCreateMaterial(Shader shader, Material material) {
if (shader == null) {
return null;
}
if (shader.isSupported && material && material.shader == shader)
return material;
if (!shader.isSupported) {
return null;
}
else {
material = new Material(shader);
material.hideFlags = HideFlags.DontSave;
if (material)
return material;
else
return null;
}
}
}
获得深度值(虽然名字取的获得深度值和法线值,但是还没有获得后者)
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
/// <summary>
/// 获得深度和法线纹理
/// </summary>
public class GetDepthAndNormalTexture : PostEffectsBase
{
// 需要挂载的Shader
public Shader depthShader;
// 自动生成的材质
private Material depthMaterial = null;
public Material material
{
get
{
depthMaterial = CheckShaderAndCreateMaterial(depthShader, depthMaterial);
return depthMaterial;
}
}
private Camera myCamera;
public Camera camera
{
get
{
if (myCamera == null)
{
myCamera = GetComponent<Camera>();
}
return myCamera;
}
}
void OnEnable()
{
camera.depthTextureMode |= DepthTextureMode.Depth;
}
void OnRenderImage(RenderTexture src, RenderTexture dest)
{
if (material != null)
{
Graphics.Blit(src, dest, material);
//Matrix4x4 currentViewProjectionMatrix = camera.projectionMatrix * camera.worldToCameraMatrix;
//Matrix4x4 currentViewProjectionInverseMatrix = currentViewProjectionMatrix.inverse;
//material.SetMatrix("_CurrentViewProjectionInverseMatrix", currentViewProjectionInverseMatrix);
Matrix4x4 projection = GL.GetGPUProjectionMatrix(camera.projectionMatrix, false) * camera.worldToCameraMatrix;
material.SetMatrix("_CurrentViewProjectionInverseMatrix", projection.inverse);
}
else
{
Graphics.Blit(src, dest);
}
}
}
2.1 首先需要明确一点,通过下面这句代码,我们可以在Shader中声明 _CameraDepthTexture 变量来获得深度纹理,这里的深度存储的是 NDC 坐标,且将范围映射到了 [ 0 , 1 ] [0, 1] [0,1].
camera.depthTextureMode |= DepthTextureMode.Depth;
2.2 如何通过Shader可视化深度图呢?
先把代码贴出来:
Shader "Unity Shaders Book/Chapter 13/DepthAndNormal"
{
Properties
{
_MainTex ("Base (RGB)", 2D) = "white" {}
}
SubShader
{
CGINCLUDE
#include "UnityCG.cginc"
sampler2D _MainTex;
half4 _MainTex_TexelSize;
sampler2D _CameraDepthTexture;
float4x4 _CurrentViewProjectionInverseMatrix;
struct v2f{
float4 pos:SV_POSITION;
half2 uv:TEXCOORD0;
half2 uv_depth:TEXCOORD1;
};
v2f vert(appdata_img v){
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = v.texcoord;
o.uv_depth = v.texcoord;
#if UNITY_UV_STARTS_AT_TOP // 如果uv的y轴是从上面开始的, 1表示Direct3d,0表示Opengl
if(_MainTex_TexelSize.y < 0)
o.uv_depth.y = 1 - o.uv_depth.y;
#endif
return o;
}
fixed4 frag(v2f i) : SV_Target{
// 采样深度纹理获得非线性空间中的深度值
float d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv_depth);
float4 H = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, d, 1);
// float4 H = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, d * 2 - 1, 1);
float4 D = mul(_CurrentViewProjectionInverseMatrix, H); // 映射回线性空间中去
float4 worldPos = D / D.w;
float world_d = saturate(worldPos.z / 96);
float linear_d = Linear01Depth(d);
float4 proj = float4(i.uv * 2 - 1, d, 1);
worldPos = mul(_CurrentViewProjectionInverseMatrix, proj);
worldPos = worldPos / worldPos.w;
// return fixed4(linear_d, linear_d, linear_d, 1.0);
return fixed4(worldPos.z, worldPos.z, worldPos.z, 1.0);
}
ENDCG
Pass {
ZTest Always Cull Off ZWrite Off
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
ENDCG
}
}
FallBack Off
}
重点看frag函数!!!
如果想获得视角空间下的深度值:
可以通过LinearEyeDepth函数来获得,改动一下返回代码即可:
float view_linear_d = LinearEyeDepth(d);
return fixed4(view_linear_d, view_linear_d, view_linear_d, 1.0);
效果(有点奇怪,感觉还有问题,有大佬欢迎提出一点意见):
如果想获得一个范围在 [ 0 , 1 ] [0,1] [0,1]的线性深度值,那么可以通过Linear01Depth函数来获得,改动一下返回代码即可:
float linear_d = Linear01Depth(d);
return fixed4(linear_d, linear_d, linear_d, 1.0);
效果:
如果想要通过深度值来获得世界坐标,那么代码如下:
float d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv_depth);
float4 H = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, d, 1);
// float4 H = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, d * 2 - 1, 1);
float4 D = mul(_CurrentViewProjectionInverseMatrix, H); // 映射回线性空间中去
float4 worldPos = D / D.w;
return fixed4(worldPos.z, worldPos.z, worldPos.z, 1.0);
效果:
GL.GetGPUProjectionMatrix(camera.projectionMatrix, false)
通过这个函数可以用来处理DX 和OpenGL的坐标差异性。
之前没有用这个函数,导致坐标系不同,没有产生正确的效果。
Unity中的编辑界面是左手坐标系,但是底层使用的是右手坐标系,因此如果这里不统一,那么会导致z轴的值有问题。