testss
This commit is contained in:
@@ -0,0 +1,117 @@
|
||||
#ifndef UNIVERSAL_PIPELINE_CORE_INCLUDED
|
||||
#define UNIVERSAL_PIPELINE_CORE_INCLUDED
|
||||
|
||||
// VT is not supported in URP (for now) this ensures any shaders using the VT
|
||||
// node work by falling to regular texture sampling.
|
||||
#define FORCE_VIRTUAL_TEXTURING_OFF 1
|
||||
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Packing.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Version.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Input.hlsl"
|
||||
|
||||
#if !defined(SHADER_HINT_NICE_QUALITY)
|
||||
#if defined(SHADER_API_MOBILE) || defined(SHADER_API_SWITCH)
|
||||
#define SHADER_HINT_NICE_QUALITY 0
|
||||
#else
|
||||
#define SHADER_HINT_NICE_QUALITY 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// Shader Quality Tiers in Universal.
|
||||
// SRP doesn't use Graphics Settings Quality Tiers.
|
||||
// We should expose shader quality tiers in the pipeline asset.
|
||||
// Meanwhile, it's forced to be:
|
||||
// High Quality: Non-mobile platforms or shader explicit defined SHADER_HINT_NICE_QUALITY
|
||||
// Medium: Mobile aside from GLES2
|
||||
// Low: GLES2
|
||||
#if SHADER_HINT_NICE_QUALITY
|
||||
#define SHADER_QUALITY_HIGH
|
||||
#elif defined(SHADER_API_GLES)
|
||||
#define SHADER_QUALITY_LOW
|
||||
#else
|
||||
#define SHADER_QUALITY_MEDIUM
|
||||
#endif
|
||||
|
||||
#ifndef BUMP_SCALE_NOT_SUPPORTED
|
||||
#define BUMP_SCALE_NOT_SUPPORTED !SHADER_HINT_NICE_QUALITY
|
||||
#endif
|
||||
|
||||
|
||||
#if UNITY_REVERSED_Z
|
||||
// TODO: workaround. There's a bug where SHADER_API_GL_CORE gets erroneously defined on switch.
|
||||
#if (defined(SHADER_API_GLCORE) && !defined(SHADER_API_SWITCH)) || defined(SHADER_API_GLES) || defined(SHADER_API_GLES3)
|
||||
//GL with reversed z => z clip range is [near, -far] -> should remap in theory but dont do it in practice to save some perf (range is close enough)
|
||||
#define UNITY_Z_0_FAR_FROM_CLIPSPACE(coord) max(-(coord), 0)
|
||||
#else
|
||||
//D3d with reversed Z => z clip range is [near, 0] -> remapping to [0, far]
|
||||
//max is required to protect ourselves from near plane not being correct/meaningfull in case of oblique matrices.
|
||||
#define UNITY_Z_0_FAR_FROM_CLIPSPACE(coord) max(((1.0-(coord)/_ProjectionParams.y)*_ProjectionParams.z),0)
|
||||
#endif
|
||||
#elif UNITY_UV_STARTS_AT_TOP
|
||||
//D3d without reversed z => z clip range is [0, far] -> nothing to do
|
||||
#define UNITY_Z_0_FAR_FROM_CLIPSPACE(coord) (coord)
|
||||
#else
|
||||
//Opengl => z clip range is [-near, far] -> should remap in theory but dont do it in practice to save some perf (range is close enough)
|
||||
#define UNITY_Z_0_FAR_FROM_CLIPSPACE(coord) (coord)
|
||||
#endif
|
||||
|
||||
// Stereo-related bits
|
||||
#if defined(UNITY_STEREO_INSTANCING_ENABLED) || defined(UNITY_STEREO_MULTIVIEW_ENABLED)
|
||||
|
||||
#define SLICE_ARRAY_INDEX unity_StereoEyeIndex
|
||||
|
||||
#define TEXTURE2D_X(textureName) TEXTURE2D_ARRAY(textureName)
|
||||
#define TEXTURE2D_X_PARAM(textureName, samplerName) TEXTURE2D_ARRAY_PARAM(textureName, samplerName)
|
||||
#define TEXTURE2D_X_ARGS(textureName, samplerName) TEXTURE2D_ARRAY_ARGS(textureName, samplerName)
|
||||
#define TEXTURE2D_X_HALF(textureName) TEXTURE2D_ARRAY_HALF(textureName)
|
||||
#define TEXTURE2D_X_FLOAT(textureName) TEXTURE2D_ARRAY_FLOAT(textureName)
|
||||
|
||||
#define LOAD_TEXTURE2D_X(textureName, unCoord2) LOAD_TEXTURE2D_ARRAY(textureName, unCoord2, SLICE_ARRAY_INDEX)
|
||||
#define LOAD_TEXTURE2D_X_LOD(textureName, unCoord2, lod) LOAD_TEXTURE2D_ARRAY_LOD(textureName, unCoord2, SLICE_ARRAY_INDEX, lod)
|
||||
#define SAMPLE_TEXTURE2D_X(textureName, samplerName, coord2) SAMPLE_TEXTURE2D_ARRAY(textureName, samplerName, coord2, SLICE_ARRAY_INDEX)
|
||||
#define SAMPLE_TEXTURE2D_X_LOD(textureName, samplerName, coord2, lod) SAMPLE_TEXTURE2D_ARRAY_LOD(textureName, samplerName, coord2, SLICE_ARRAY_INDEX, lod)
|
||||
#define GATHER_TEXTURE2D_X(textureName, samplerName, coord2) GATHER_TEXTURE2D_ARRAY(textureName, samplerName, coord2, SLICE_ARRAY_INDEX)
|
||||
#define GATHER_RED_TEXTURE2D_X(textureName, samplerName, coord2) GATHER_RED_TEXTURE2D(textureName, samplerName, float3(coord2, SLICE_ARRAY_INDEX))
|
||||
#define GATHER_GREEN_TEXTURE2D_X(textureName, samplerName, coord2) GATHER_GREEN_TEXTURE2D(textureName, samplerName, float3(coord2, SLICE_ARRAY_INDEX))
|
||||
#define GATHER_BLUE_TEXTURE2D_X(textureName, samplerName, coord2) GATHER_BLUE_TEXTURE2D(textureName, samplerName, float3(coord2, SLICE_ARRAY_INDEX))
|
||||
|
||||
#else
|
||||
#define SLICE_ARRAY_INDEX 0
|
||||
|
||||
#define TEXTURE2D_X(textureName) TEXTURE2D(textureName)
|
||||
#define TEXTURE2D_X_PARAM(textureName, samplerName) TEXTURE2D_PARAM(textureName, samplerName)
|
||||
#define TEXTURE2D_X_ARGS(textureName, samplerName) TEXTURE2D_ARGS(textureName, samplerName)
|
||||
#define TEXTURE2D_X_HALF(textureName) TEXTURE2D_HALF(textureName)
|
||||
#define TEXTURE2D_X_FLOAT(textureName) TEXTURE2D_FLOAT(textureName)
|
||||
|
||||
#define LOAD_TEXTURE2D_X(textureName, unCoord2) LOAD_TEXTURE2D(textureName, unCoord2)
|
||||
#define LOAD_TEXTURE2D_X_LOD(textureName, unCoord2, lod) LOAD_TEXTURE2D_LOD(textureName, unCoord2, lod)
|
||||
#define SAMPLE_TEXTURE2D_X(textureName, samplerName, coord2) SAMPLE_TEXTURE2D(textureName, samplerName, coord2)
|
||||
#define SAMPLE_TEXTURE2D_X_LOD(textureName, samplerName, coord2, lod) SAMPLE_TEXTURE2D_LOD(textureName, samplerName, coord2, lod)
|
||||
#define GATHER_TEXTURE2D_X(textureName, samplerName, coord2) GATHER_TEXTURE2D(textureName, samplerName, coord2)
|
||||
#define GATHER_RED_TEXTURE2D_X(textureName, samplerName, coord2) GATHER_RED_TEXTURE2D(textureName, samplerName, coord2)
|
||||
#define GATHER_GREEN_TEXTURE2D_X(textureName, samplerName, coord2) GATHER_GREEN_TEXTURE2D(textureName, samplerName, coord2)
|
||||
#define GATHER_BLUE_TEXTURE2D_X(textureName, samplerName, coord2) GATHER_BLUE_TEXTURE2D(textureName, samplerName, coord2)
|
||||
#endif
|
||||
|
||||
// Structs
|
||||
struct VertexPositionInputs
|
||||
{
|
||||
float3 positionWS; // World space position
|
||||
float3 positionVS; // View space position
|
||||
float4 positionCS; // Homogeneous clip space position
|
||||
float4 positionNDC;// Homogeneous normalized device coordinates
|
||||
};
|
||||
|
||||
struct VertexNormalInputs
|
||||
{
|
||||
real3 tangentWS;
|
||||
real3 bitangentWS;
|
||||
float3 normalWS;
|
||||
};
|
||||
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/ShaderVariablesFunctions.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Deprecated.hlsl"
|
||||
|
||||
#endif
|
@@ -0,0 +1,17 @@
|
||||
#ifndef UNITY_DECLARE_DEPTH_TEXTURE_INCLUDED
|
||||
#define UNITY_DECLARE_DEPTH_TEXTURE_INCLUDED
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
|
||||
|
||||
TEXTURE2D_X_FLOAT(_CameraDepthTexture);
|
||||
SAMPLER(sampler_CameraDepthTexture);
|
||||
|
||||
float SampleSceneDepth(float2 uv)
|
||||
{
|
||||
return SAMPLE_TEXTURE2D_X(_CameraDepthTexture, sampler_CameraDepthTexture, UnityStereoTransformScreenSpaceTex(uv)).r;
|
||||
}
|
||||
|
||||
float LoadSceneDepth(uint2 uv)
|
||||
{
|
||||
return LOAD_TEXTURE2D_X(_CameraDepthTexture, uv).r;
|
||||
}
|
||||
#endif
|
@@ -0,0 +1,17 @@
|
||||
#ifndef UNITY_DECLARE_NORMALS_TEXTURE_INCLUDED
|
||||
#define UNITY_DECLARE_NORMALS_TEXTURE_INCLUDED
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
|
||||
|
||||
TEXTURE2D_X_FLOAT(_CameraNormalsTexture);
|
||||
SAMPLER(sampler_CameraNormalsTexture);
|
||||
|
||||
float3 SampleSceneNormals(float2 uv)
|
||||
{
|
||||
return UnpackNormalOctRectEncode(SAMPLE_TEXTURE2D_X(_CameraNormalsTexture, sampler_CameraNormalsTexture, UnityStereoTransformScreenSpaceTex(uv)).xy) * float3(1.0, 1.0, -1.0);
|
||||
}
|
||||
|
||||
float3 LoadSceneNormals(uint2 uv)
|
||||
{
|
||||
return UnpackNormalOctRectEncode(LOAD_TEXTURE2D_X(_CameraNormalsTexture, uv).xy) * float3(1.0, 1.0, -1.0);
|
||||
}
|
||||
#endif
|
@@ -0,0 +1,17 @@
|
||||
#ifndef UNITY_DECLARE_OPAQUE_TEXTURE_INCLUDED
|
||||
#define UNITY_DECLARE_OPAQUE_TEXTURE_INCLUDED
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
|
||||
|
||||
TEXTURE2D_X(_CameraOpaqueTexture);
|
||||
SAMPLER(sampler_CameraOpaqueTexture);
|
||||
|
||||
float3 SampleSceneColor(float2 uv)
|
||||
{
|
||||
return SAMPLE_TEXTURE2D_X(_CameraOpaqueTexture, sampler_CameraOpaqueTexture, UnityStereoTransformScreenSpaceTex(uv)).rgb;
|
||||
}
|
||||
|
||||
float3 LoadSceneColor(uint2 uv)
|
||||
{
|
||||
return LOAD_TEXTURE2D_X(_CameraOpaqueTexture, uv).rgb;
|
||||
}
|
||||
#endif
|
@@ -0,0 +1,19 @@
|
||||
// This file should be used as a container for things on its
|
||||
// way to being deprecated and removed in future releases
|
||||
|
||||
using System;
|
||||
|
||||
namespace UnityEngine.Rendering.Universal
|
||||
{
|
||||
public static partial class ShaderInput
|
||||
{
|
||||
//Even when RenderingUtils.useStructuredBuffer is true we do not this structure anymore, because in shader side worldToShadowMatrix and shadowParams must be stored in arrays of different sizes
|
||||
// To specify shader-side shadow matrices and shadow parameters, see code in AdditionalLightsShadowCasterPass.SetupAdditionalLightsShadowReceiverConstants
|
||||
[Obsolete("ShaderInput.ShadowData was deprecated. Shadow slice matrices and per-light shadow parameters are now passed to the GPU using entries in buffers m_AdditionalLightsWorldToShadow_SSBO and m_AdditionalShadowParams_SSBO", false)]
|
||||
public struct ShadowData
|
||||
{
|
||||
public Matrix4x4 worldToShadowMatrix;
|
||||
public Vector4 shadowParams;
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,41 @@
|
||||
#ifndef UNIVERSAL_DEPRECATED_INCLUDED
|
||||
#define UNIVERSAL_DEPRECATED_INCLUDED
|
||||
|
||||
// Stereo-related bits
|
||||
#define SCREENSPACE_TEXTURE TEXTURE2D_X
|
||||
#define SCREENSPACE_TEXTURE_FLOAT TEXTURE2D_X_FLOAT
|
||||
#define SCREENSPACE_TEXTURE_HALF TEXTURE2D_X_HALF
|
||||
|
||||
// Typo-fixes, re-route to new name for backwards compatiblity (if there are external dependencies).
|
||||
#define kDieletricSpec kDielectricSpec
|
||||
#define DirectBDRF DirectBRDF
|
||||
|
||||
// Deprecated: not using consistent naming convention
|
||||
#if defined(USING_STEREO_MATRICES)
|
||||
#define unity_StereoMatrixIP unity_StereoMatrixInvP
|
||||
#define unity_StereoMatrixIVP unity_StereoMatrixInvVP
|
||||
#endif
|
||||
|
||||
// Previously used when rendering with DrawObjectsPass.
|
||||
// Global object render pass data containing various settings.
|
||||
// x,y,z are currently unused
|
||||
// w is used for knowing whether the object is opaque(1) or alpha blended(0)
|
||||
half4 _DrawObjectPassData;
|
||||
|
||||
#if USE_STRUCTURED_BUFFER_FOR_LIGHT_DATA
|
||||
// _AdditionalShadowsIndices was deprecated - To get the first shadow slice index for a light, use GetAdditionalLightShadowParams(lightIndex).w [see Shadows.hlsl]
|
||||
#define _AdditionalShadowsIndices _AdditionalShadowParams_SSBO
|
||||
// _AdditionalShadowsBuffer was deprecated - To access a shadow slice's matrix, use _AdditionalLightsWorldToShadow_SSBO[shadowSliceIndex] - To access other shadow parameters, use GetAdditionalLightShadowParams(int lightIndex) [see Shadows.hlsl]
|
||||
#define _AdditionalShadowsBuffer _AdditionalLightsWorldToShadow_SSBO
|
||||
#endif
|
||||
|
||||
// Deprecated: even when USE_STRUCTURED_BUFFER_FOR_LIGHT_DATA is defined we do not this structure anymore, because worldToShadowMatrix and shadowParams must be stored in arrays of different sizes
|
||||
// To get the first shadow slice index for a light, use GetAdditionalLightShadowParams(lightIndex).w [see Shadows.hlsl]
|
||||
// To access other shadow parameters, use GetAdditionalLightShadowParams(int lightIndex)[see Shadows.hlsl]
|
||||
struct ShadowData
|
||||
{
|
||||
float4x4 worldToShadowMatrix; // per-shadow-slice
|
||||
float4 shadowParams; // per-casting-light
|
||||
};
|
||||
|
||||
#endif // UNIVERSAL_DEPRECATED_INCLUDED
|
@@ -0,0 +1,94 @@
|
||||
#ifndef UNIVERSAL_INPUT_INCLUDED
|
||||
#define UNIVERSAL_INPUT_INCLUDED
|
||||
|
||||
#define MAX_VISIBLE_LIGHTS_UBO 32
|
||||
#define MAX_VISIBLE_LIGHTS_SSBO 256
|
||||
|
||||
// Keep in sync with RenderingUtils.useStructuredBuffer
|
||||
#define USE_STRUCTURED_BUFFER_FOR_LIGHT_DATA 0
|
||||
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/ShaderTypes.cs.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Deprecated.hlsl"
|
||||
|
||||
#if defined(SHADER_API_MOBILE) && (defined(SHADER_API_GLES) || defined(SHADER_API_GLES30))
|
||||
#define MAX_VISIBLE_LIGHTS 16
|
||||
#elif defined(SHADER_API_MOBILE) || (defined(SHADER_API_GLCORE) && !defined(SHADER_API_SWITCH)) || defined(SHADER_API_GLES) || defined(SHADER_API_GLES3) // Workaround for bug on Nintendo Switch where SHADER_API_GLCORE is mistakenly defined
|
||||
#define MAX_VISIBLE_LIGHTS 32
|
||||
#else
|
||||
#define MAX_VISIBLE_LIGHTS 256
|
||||
#endif
|
||||
|
||||
struct InputData
|
||||
{
|
||||
float3 positionWS;
|
||||
half3 normalWS;
|
||||
half3 viewDirectionWS;
|
||||
float4 shadowCoord;
|
||||
half fogCoord;
|
||||
half3 vertexLighting;
|
||||
half3 bakedGI;
|
||||
float2 normalizedScreenSpaceUV;
|
||||
half4 shadowMask;
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Constant Buffers //
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
half4 _GlossyEnvironmentColor;
|
||||
half4 _SubtractiveShadowColor;
|
||||
|
||||
#define _InvCameraViewProj unity_MatrixInvVP
|
||||
float4 _ScaledScreenParams;
|
||||
|
||||
float4 _MainLightPosition;
|
||||
half4 _MainLightColor;
|
||||
half4 _MainLightOcclusionProbes;
|
||||
|
||||
// xyz are currently unused
|
||||
// w: directLightStrength
|
||||
half4 _AmbientOcclusionParam;
|
||||
|
||||
half4 _AdditionalLightsCount;
|
||||
|
||||
#if USE_STRUCTURED_BUFFER_FOR_LIGHT_DATA
|
||||
StructuredBuffer<LightData> _AdditionalLightsBuffer;
|
||||
StructuredBuffer<int> _AdditionalLightsIndices;
|
||||
#else
|
||||
// GLES3 causes a performance regression in some devices when using CBUFFER.
|
||||
#ifndef SHADER_API_GLES3
|
||||
CBUFFER_START(AdditionalLights)
|
||||
#endif
|
||||
float4 _AdditionalLightsPosition[MAX_VISIBLE_LIGHTS];
|
||||
half4 _AdditionalLightsColor[MAX_VISIBLE_LIGHTS];
|
||||
half4 _AdditionalLightsAttenuation[MAX_VISIBLE_LIGHTS];
|
||||
half4 _AdditionalLightsSpotDir[MAX_VISIBLE_LIGHTS];
|
||||
half4 _AdditionalLightsOcclusionProbes[MAX_VISIBLE_LIGHTS];
|
||||
#ifndef SHADER_API_GLES3
|
||||
CBUFFER_END
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define UNITY_MATRIX_M unity_ObjectToWorld
|
||||
#define UNITY_MATRIX_I_M unity_WorldToObject
|
||||
#define UNITY_MATRIX_V unity_MatrixV
|
||||
#define UNITY_MATRIX_I_V unity_MatrixInvV
|
||||
#define UNITY_MATRIX_P OptimizeProjectionMatrix(glstate_matrix_projection)
|
||||
#define UNITY_MATRIX_I_P unity_MatrixInvP
|
||||
#define UNITY_MATRIX_VP unity_MatrixVP
|
||||
#define UNITY_MATRIX_I_VP unity_MatrixInvVP
|
||||
#define UNITY_MATRIX_MV mul(UNITY_MATRIX_V, UNITY_MATRIX_M)
|
||||
#define UNITY_MATRIX_T_MV transpose(UNITY_MATRIX_MV)
|
||||
#define UNITY_MATRIX_IT_MV transpose(mul(UNITY_MATRIX_I_M, UNITY_MATRIX_I_V))
|
||||
#define UNITY_MATRIX_MVP mul(UNITY_MATRIX_VP, UNITY_MATRIX_M)
|
||||
|
||||
// Note: #include order is important here.
|
||||
// UnityInput.hlsl must be included before UnityInstancing.hlsl, so constant buffer
|
||||
// declarations don't fail because of instancing macros.
|
||||
// UniversalDOTSInstancing.hlsl must be included after UnityInstancing.hlsl
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/UnityInput.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/UnityInstancing.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/UniversalDOTSInstancing.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/SpaceTransforms.hlsl"
|
||||
|
||||
#endif
|
@@ -0,0 +1,949 @@
|
||||
#ifndef UNIVERSAL_LIGHTING_INCLUDED
|
||||
#define UNIVERSAL_LIGHTING_INCLUDED
|
||||
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/CommonMaterial.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/EntityLighting.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/ImageBasedLighting.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/BSDF.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Deprecated.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/SurfaceData.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Shadows.hlsl"
|
||||
|
||||
// If lightmap is not defined than we evaluate GI (ambient + probes) from SH
|
||||
// We might do it fully or partially in vertex to save shader ALU
|
||||
#if !defined(LIGHTMAP_ON)
|
||||
// TODO: Controls things like these by exposing SHADER_QUALITY levels (low, medium, high)
|
||||
#if defined(SHADER_API_GLES) || !defined(_NORMALMAP)
|
||||
// Evaluates SH fully in vertex
|
||||
#define EVALUATE_SH_VERTEX
|
||||
#elif !SHADER_HINT_NICE_QUALITY
|
||||
// Evaluates L2 SH in vertex and L0L1 in pixel
|
||||
#define EVALUATE_SH_MIXED
|
||||
#endif
|
||||
// Otherwise evaluate SH fully per-pixel
|
||||
#endif
|
||||
|
||||
#ifdef LIGHTMAP_ON
|
||||
#define DECLARE_LIGHTMAP_OR_SH(lmName, shName, index) float2 lmName : TEXCOORD##index
|
||||
#define OUTPUT_LIGHTMAP_UV(lightmapUV, lightmapScaleOffset, OUT) OUT.xy = lightmapUV.xy * lightmapScaleOffset.xy + lightmapScaleOffset.zw;
|
||||
#define OUTPUT_SH(normalWS, OUT)
|
||||
#else
|
||||
#define DECLARE_LIGHTMAP_OR_SH(lmName, shName, index) half3 shName : TEXCOORD##index
|
||||
#define OUTPUT_LIGHTMAP_UV(lightmapUV, lightmapScaleOffset, OUT)
|
||||
#define OUTPUT_SH(normalWS, OUT) OUT.xyz = SampleSHVertex(normalWS)
|
||||
#endif
|
||||
|
||||
// Renamed -> LIGHTMAP_SHADOW_MIXING
|
||||
#if !defined(_MIXED_LIGHTING_SUBTRACTIVE) && defined(LIGHTMAP_SHADOW_MIXING) && !defined(SHADOWS_SHADOWMASK)
|
||||
#define _MIXED_LIGHTING_SUBTRACTIVE
|
||||
#endif
|
||||
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Light Helpers //
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Abstraction over Light shading data.
|
||||
struct Light
|
||||
{
|
||||
half3 direction;
|
||||
half3 color;
|
||||
half distanceAttenuation;
|
||||
half shadowAttenuation;
|
||||
};
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Attenuation Functions /
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Matches Unity Vanila attenuation
|
||||
// Attenuation smoothly decreases to light range.
|
||||
float DistanceAttenuation(float distanceSqr, half2 distanceAttenuation)
|
||||
{
|
||||
// We use a shared distance attenuation for additional directional and puctual lights
|
||||
// for directional lights attenuation will be 1
|
||||
float lightAtten = rcp(distanceSqr);
|
||||
|
||||
#if SHADER_HINT_NICE_QUALITY
|
||||
// Use the smoothing factor also used in the Unity lightmapper.
|
||||
half factor = distanceSqr * distanceAttenuation.x;
|
||||
half smoothFactor = saturate(1.0h - factor * factor);
|
||||
smoothFactor = smoothFactor * smoothFactor;
|
||||
#else
|
||||
// We need to smoothly fade attenuation to light range. We start fading linearly at 80% of light range
|
||||
// Therefore:
|
||||
// fadeDistance = (0.8 * 0.8 * lightRangeSq)
|
||||
// smoothFactor = (lightRangeSqr - distanceSqr) / (lightRangeSqr - fadeDistance)
|
||||
// We can rewrite that to fit a MAD by doing
|
||||
// distanceSqr * (1.0 / (fadeDistanceSqr - lightRangeSqr)) + (-lightRangeSqr / (fadeDistanceSqr - lightRangeSqr)
|
||||
// distanceSqr * distanceAttenuation.y + distanceAttenuation.z
|
||||
half smoothFactor = saturate(distanceSqr * distanceAttenuation.x + distanceAttenuation.y);
|
||||
#endif
|
||||
|
||||
return lightAtten * smoothFactor;
|
||||
}
|
||||
|
||||
half AngleAttenuation(half3 spotDirection, half3 lightDirection, half2 spotAttenuation)
|
||||
{
|
||||
// Spot Attenuation with a linear falloff can be defined as
|
||||
// (SdotL - cosOuterAngle) / (cosInnerAngle - cosOuterAngle)
|
||||
// This can be rewritten as
|
||||
// invAngleRange = 1.0 / (cosInnerAngle - cosOuterAngle)
|
||||
// SdotL * invAngleRange + (-cosOuterAngle * invAngleRange)
|
||||
// SdotL * spotAttenuation.x + spotAttenuation.y
|
||||
|
||||
// If we precompute the terms in a MAD instruction
|
||||
half SdotL = dot(spotDirection, lightDirection);
|
||||
half atten = saturate(SdotL * spotAttenuation.x + spotAttenuation.y);
|
||||
return atten * atten;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Light Abstraction //
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
Light GetMainLight()
|
||||
{
|
||||
Light light;
|
||||
light.direction = _MainLightPosition.xyz;
|
||||
light.distanceAttenuation = unity_LightData.z; // unity_LightData.z is 1 when not culled by the culling mask, otherwise 0.
|
||||
light.shadowAttenuation = 1.0;
|
||||
light.color = _MainLightColor.rgb;
|
||||
|
||||
return light;
|
||||
}
|
||||
|
||||
Light GetMainLight(float4 shadowCoord)
|
||||
{
|
||||
Light light = GetMainLight();
|
||||
light.shadowAttenuation = MainLightRealtimeShadow(shadowCoord);
|
||||
return light;
|
||||
}
|
||||
|
||||
Light GetMainLight(float4 shadowCoord, float3 positionWS, half4 shadowMask)
|
||||
{
|
||||
Light light = GetMainLight();
|
||||
light.shadowAttenuation = MainLightShadow(shadowCoord, positionWS, shadowMask, _MainLightOcclusionProbes);
|
||||
return light;
|
||||
}
|
||||
|
||||
// Fills a light struct given a perObjectLightIndex
|
||||
Light GetAdditionalPerObjectLight(int perObjectLightIndex, float3 positionWS)
|
||||
{
|
||||
// Abstraction over Light input constants
|
||||
#if USE_STRUCTURED_BUFFER_FOR_LIGHT_DATA
|
||||
float4 lightPositionWS = _AdditionalLightsBuffer[perObjectLightIndex].position;
|
||||
half3 color = _AdditionalLightsBuffer[perObjectLightIndex].color.rgb;
|
||||
half4 distanceAndSpotAttenuation = _AdditionalLightsBuffer[perObjectLightIndex].attenuation;
|
||||
half4 spotDirection = _AdditionalLightsBuffer[perObjectLightIndex].spotDirection;
|
||||
#else
|
||||
float4 lightPositionWS = _AdditionalLightsPosition[perObjectLightIndex];
|
||||
half3 color = _AdditionalLightsColor[perObjectLightIndex].rgb;
|
||||
half4 distanceAndSpotAttenuation = _AdditionalLightsAttenuation[perObjectLightIndex];
|
||||
half4 spotDirection = _AdditionalLightsSpotDir[perObjectLightIndex];
|
||||
#endif
|
||||
|
||||
// Directional lights store direction in lightPosition.xyz and have .w set to 0.0.
|
||||
// This way the following code will work for both directional and punctual lights.
|
||||
float3 lightVector = lightPositionWS.xyz - positionWS * lightPositionWS.w;
|
||||
float distanceSqr = max(dot(lightVector, lightVector), HALF_MIN);
|
||||
|
||||
half3 lightDirection = half3(lightVector * rsqrt(distanceSqr));
|
||||
half attenuation = DistanceAttenuation(distanceSqr, distanceAndSpotAttenuation.xy) * AngleAttenuation(spotDirection.xyz, lightDirection, distanceAndSpotAttenuation.zw);
|
||||
|
||||
Light light;
|
||||
light.direction = lightDirection;
|
||||
light.distanceAttenuation = attenuation;
|
||||
light.shadowAttenuation = 1.0; // This value can later be overridden in GetAdditionalLight(uint i, float3 positionWS, half4 shadowMask)
|
||||
light.color = color;
|
||||
|
||||
return light;
|
||||
}
|
||||
|
||||
uint GetPerObjectLightIndexOffset()
|
||||
{
|
||||
#if USE_STRUCTURED_BUFFER_FOR_LIGHT_DATA
|
||||
return unity_LightData.x;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Returns a per-object index given a loop index.
|
||||
// This abstract the underlying data implementation for storing lights/light indices
|
||||
int GetPerObjectLightIndex(uint index)
|
||||
{
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Structured Buffer Path /
|
||||
// /
|
||||
// Lights and light indices are stored in StructuredBuffer. We can just index them. /
|
||||
// Currently all non-mobile platforms take this path :( /
|
||||
// There are limitation in mobile GPUs to use SSBO (performance / no vertex shader support) /
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#if USE_STRUCTURED_BUFFER_FOR_LIGHT_DATA
|
||||
uint offset = unity_LightData.x;
|
||||
return _AdditionalLightsIndices[offset + index];
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
// UBO path /
|
||||
// /
|
||||
// We store 8 light indices in float4 unity_LightIndices[2]; /
|
||||
// Due to memory alignment unity doesn't support int[] or float[] /
|
||||
// Even trying to reinterpret cast the unity_LightIndices to float[] won't work /
|
||||
// it will cast to float4[] and create extra register pressure. :( /
|
||||
/////////////////////////////////////////////////////////////////////////////////////////////
|
||||
#elif !defined(SHADER_API_GLES)
|
||||
// since index is uint shader compiler will implement
|
||||
// div & mod as bitfield ops (shift and mask).
|
||||
|
||||
// TODO: Can we index a float4? Currently compiler is
|
||||
// replacing unity_LightIndicesX[i] with a dp4 with identity matrix.
|
||||
// u_xlat16_40 = dot(unity_LightIndices[int(u_xlatu13)], ImmCB_0_0_0[u_xlati1]);
|
||||
// This increases both arithmetic and register pressure.
|
||||
return unity_LightIndices[index / 4][index % 4];
|
||||
#else
|
||||
// Fallback to GLES2. No bitfield magic here :(.
|
||||
// We limit to 4 indices per object and only sample unity_4LightIndices0.
|
||||
// Conditional moves are branch free even on mali-400
|
||||
// small arithmetic cost but no extra register pressure from ImmCB_0_0_0 matrix.
|
||||
half2 lightIndex2 = (index < 2.0h) ? unity_LightIndices[0].xy : unity_LightIndices[0].zw;
|
||||
half i_rem = (index < 2.0h) ? index : index - 2.0h;
|
||||
return (i_rem < 1.0h) ? lightIndex2.x : lightIndex2.y;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Fills a light struct given a loop i index. This will convert the i
|
||||
// index to a perObjectLightIndex
|
||||
Light GetAdditionalLight(uint i, float3 positionWS)
|
||||
{
|
||||
int perObjectLightIndex = GetPerObjectLightIndex(i);
|
||||
return GetAdditionalPerObjectLight(perObjectLightIndex, positionWS);
|
||||
}
|
||||
|
||||
Light GetAdditionalLight(uint i, float3 positionWS, half4 shadowMask)
|
||||
{
|
||||
int perObjectLightIndex = GetPerObjectLightIndex(i);
|
||||
Light light = GetAdditionalPerObjectLight(perObjectLightIndex, positionWS);
|
||||
|
||||
#if USE_STRUCTURED_BUFFER_FOR_LIGHT_DATA
|
||||
half4 occlusionProbeChannels = _AdditionalLightsBuffer[perObjectLightIndex].occlusionProbeChannels;
|
||||
#else
|
||||
half4 occlusionProbeChannels = _AdditionalLightsOcclusionProbes[perObjectLightIndex];
|
||||
#endif
|
||||
light.shadowAttenuation = AdditionalLightShadow(perObjectLightIndex, positionWS, light.direction, shadowMask, occlusionProbeChannels);
|
||||
|
||||
return light;
|
||||
}
|
||||
|
||||
int GetAdditionalLightsCount()
|
||||
{
|
||||
// TODO: we need to expose in SRP api an ability for the pipeline cap the amount of lights
|
||||
// in the culling. This way we could do the loop branch with an uniform
|
||||
// This would be helpful to support baking exceeding lights in SH as well
|
||||
return min(_AdditionalLightsCount.x, unity_LightData.y);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// BRDF Functions //
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#define kDielectricSpec half4(0.04, 0.04, 0.04, 1.0 - 0.04) // standard dielectric reflectivity coef at incident angle (= 4%)
|
||||
|
||||
struct BRDFData
|
||||
{
|
||||
half3 diffuse;
|
||||
half3 specular;
|
||||
half reflectivity;
|
||||
half perceptualRoughness;
|
||||
half roughness;
|
||||
half roughness2;
|
||||
half grazingTerm;
|
||||
|
||||
// We save some light invariant BRDF terms so we don't have to recompute
|
||||
// them in the light loop. Take a look at DirectBRDF function for detailed explaination.
|
||||
half normalizationTerm; // roughness * 4.0 + 2.0
|
||||
half roughness2MinusOne; // roughness^2 - 1.0
|
||||
};
|
||||
|
||||
half ReflectivitySpecular(half3 specular)
|
||||
{
|
||||
#if defined(SHADER_API_GLES)
|
||||
return specular.r; // Red channel - because most metals are either monocrhome or with redish/yellowish tint
|
||||
#else
|
||||
return max(max(specular.r, specular.g), specular.b);
|
||||
#endif
|
||||
}
|
||||
|
||||
half OneMinusReflectivityMetallic(half metallic)
|
||||
{
|
||||
// We'll need oneMinusReflectivity, so
|
||||
// 1-reflectivity = 1-lerp(dielectricSpec, 1, metallic) = lerp(1-dielectricSpec, 0, metallic)
|
||||
// store (1-dielectricSpec) in kDielectricSpec.a, then
|
||||
// 1-reflectivity = lerp(alpha, 0, metallic) = alpha + metallic*(0 - alpha) =
|
||||
// = alpha - metallic * alpha
|
||||
half oneMinusDielectricSpec = kDielectricSpec.a;
|
||||
return oneMinusDielectricSpec - metallic * oneMinusDielectricSpec;
|
||||
}
|
||||
|
||||
inline void InitializeBRDFDataDirect(half3 diffuse, half3 specular, half reflectivity, half oneMinusReflectivity, half smoothness, inout half alpha, out BRDFData outBRDFData)
|
||||
{
|
||||
outBRDFData.diffuse = diffuse;
|
||||
outBRDFData.specular = specular;
|
||||
outBRDFData.reflectivity = reflectivity;
|
||||
|
||||
outBRDFData.perceptualRoughness = PerceptualSmoothnessToPerceptualRoughness(smoothness);
|
||||
outBRDFData.roughness = max(PerceptualRoughnessToRoughness(outBRDFData.perceptualRoughness), HALF_MIN_SQRT);
|
||||
outBRDFData.roughness2 = max(outBRDFData.roughness * outBRDFData.roughness, HALF_MIN);
|
||||
outBRDFData.grazingTerm = saturate(smoothness + reflectivity);
|
||||
outBRDFData.normalizationTerm = outBRDFData.roughness * 4.0h + 2.0h;
|
||||
outBRDFData.roughness2MinusOne = outBRDFData.roughness2 - 1.0h;
|
||||
|
||||
#ifdef _ALPHAPREMULTIPLY_ON
|
||||
outBRDFData.diffuse *= alpha;
|
||||
alpha = alpha * oneMinusReflectivity + reflectivity; // NOTE: alpha modified and propagated up.
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void InitializeBRDFData(half3 albedo, half metallic, half3 specular, half smoothness, inout half alpha, out BRDFData outBRDFData)
|
||||
{
|
||||
#ifdef _SPECULAR_SETUP
|
||||
half reflectivity = ReflectivitySpecular(specular);
|
||||
half oneMinusReflectivity = 1.0 - reflectivity;
|
||||
half3 brdfDiffuse = albedo * (half3(1.0h, 1.0h, 1.0h) - specular);
|
||||
half3 brdfSpecular = specular;
|
||||
#else
|
||||
half oneMinusReflectivity = OneMinusReflectivityMetallic(metallic);
|
||||
half reflectivity = 1.0 - oneMinusReflectivity;
|
||||
half3 brdfDiffuse = albedo * oneMinusReflectivity;
|
||||
half3 brdfSpecular = lerp(kDieletricSpec.rgb, albedo, metallic);
|
||||
#endif
|
||||
|
||||
InitializeBRDFDataDirect(brdfDiffuse, brdfSpecular, reflectivity, oneMinusReflectivity, smoothness, alpha, outBRDFData);
|
||||
}
|
||||
|
||||
half3 ConvertF0ForClearCoat15(half3 f0)
|
||||
{
|
||||
#if defined(SHADER_API_MOBILE)
|
||||
return ConvertF0ForAirInterfaceToF0ForClearCoat15Fast(f0);
|
||||
#else
|
||||
return ConvertF0ForAirInterfaceToF0ForClearCoat15(f0);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void InitializeBRDFDataClearCoat(half clearCoatMask, half clearCoatSmoothness, inout BRDFData baseBRDFData, out BRDFData outBRDFData)
|
||||
{
|
||||
// Calculate Roughness of Clear Coat layer
|
||||
outBRDFData.diffuse = kDielectricSpec.aaa; // 1 - kDielectricSpec
|
||||
outBRDFData.specular = kDielectricSpec.rgb;
|
||||
outBRDFData.reflectivity = kDielectricSpec.r;
|
||||
|
||||
outBRDFData.perceptualRoughness = PerceptualSmoothnessToPerceptualRoughness(clearCoatSmoothness);
|
||||
outBRDFData.roughness = max(PerceptualRoughnessToRoughness(outBRDFData.perceptualRoughness), HALF_MIN_SQRT);
|
||||
outBRDFData.roughness2 = max(outBRDFData.roughness * outBRDFData.roughness, HALF_MIN);
|
||||
outBRDFData.normalizationTerm = outBRDFData.roughness * 4.0h + 2.0h;
|
||||
outBRDFData.roughness2MinusOne = outBRDFData.roughness2 - 1.0h;
|
||||
outBRDFData.grazingTerm = saturate(clearCoatSmoothness + kDielectricSpec.x);
|
||||
|
||||
// Relatively small effect, cut it for lower quality
|
||||
#if !defined(SHADER_API_MOBILE)
|
||||
// Modify Roughness of base layer using coat IOR
|
||||
half ieta = lerp(1.0h, CLEAR_COAT_IETA, clearCoatMask);
|
||||
half coatRoughnessScale = Sq(ieta);
|
||||
half sigma = RoughnessToVariance(PerceptualRoughnessToRoughness(baseBRDFData.perceptualRoughness));
|
||||
|
||||
baseBRDFData.perceptualRoughness = RoughnessToPerceptualRoughness(VarianceToRoughness(sigma * coatRoughnessScale));
|
||||
|
||||
// Recompute base material for new roughness, previous computation should be eliminated by the compiler (as it's unused)
|
||||
baseBRDFData.roughness = max(PerceptualRoughnessToRoughness(baseBRDFData.perceptualRoughness), HALF_MIN_SQRT);
|
||||
baseBRDFData.roughness2 = max(baseBRDFData.roughness * baseBRDFData.roughness, HALF_MIN);
|
||||
baseBRDFData.normalizationTerm = baseBRDFData.roughness * 4.0h + 2.0h;
|
||||
baseBRDFData.roughness2MinusOne = baseBRDFData.roughness2 - 1.0h;
|
||||
#endif
|
||||
|
||||
// Darken/saturate base layer using coat to surface reflectance (vs. air to surface)
|
||||
baseBRDFData.specular = lerp(baseBRDFData.specular, ConvertF0ForClearCoat15(baseBRDFData.specular), clearCoatMask);
|
||||
// TODO: what about diffuse? at least in specular workflow diffuse should be recalculated as it directly depends on it.
|
||||
}
|
||||
|
||||
// Computes the specular term for EnvironmentBRDF
|
||||
half3 EnvironmentBRDFSpecular(BRDFData brdfData, half fresnelTerm)
|
||||
{
|
||||
float surfaceReduction = 1.0 / (brdfData.roughness2 + 1.0);
|
||||
return surfaceReduction * lerp(brdfData.specular, brdfData.grazingTerm, fresnelTerm);
|
||||
}
|
||||
|
||||
half3 EnvironmentBRDF(BRDFData brdfData, half3 indirectDiffuse, half3 indirectSpecular, half fresnelTerm)
|
||||
{
|
||||
half3 c = indirectDiffuse * brdfData.diffuse;
|
||||
c += indirectSpecular * EnvironmentBRDFSpecular(brdfData, fresnelTerm);
|
||||
return c;
|
||||
}
|
||||
|
||||
// Environment BRDF without diffuse for clear coat
|
||||
half3 EnvironmentBRDFClearCoat(BRDFData brdfData, half clearCoatMask, half3 indirectSpecular, half fresnelTerm)
|
||||
{
|
||||
float surfaceReduction = 1.0 / (brdfData.roughness2 + 1.0);
|
||||
return indirectSpecular * EnvironmentBRDFSpecular(brdfData, fresnelTerm) * clearCoatMask;
|
||||
}
|
||||
|
||||
// Computes the scalar specular term for Minimalist CookTorrance BRDF
|
||||
// NOTE: needs to be multiplied with reflectance f0, i.e. specular color to complete
|
||||
half DirectBRDFSpecular(BRDFData brdfData, half3 normalWS, half3 lightDirectionWS, half3 viewDirectionWS)
|
||||
{
|
||||
float3 halfDir = SafeNormalize(float3(lightDirectionWS) + float3(viewDirectionWS));
|
||||
|
||||
float NoH = saturate(dot(normalWS, halfDir));
|
||||
half LoH = saturate(dot(lightDirectionWS, halfDir));
|
||||
|
||||
// GGX Distribution multiplied by combined approximation of Visibility and Fresnel
|
||||
// BRDFspec = (D * V * F) / 4.0
|
||||
// D = roughness^2 / ( NoH^2 * (roughness^2 - 1) + 1 )^2
|
||||
// V * F = 1.0 / ( LoH^2 * (roughness + 0.5) )
|
||||
// See "Optimizing PBR for Mobile" from Siggraph 2015 moving mobile graphics course
|
||||
// https://community.arm.com/events/1155
|
||||
|
||||
// Final BRDFspec = roughness^2 / ( NoH^2 * (roughness^2 - 1) + 1 )^2 * (LoH^2 * (roughness + 0.5) * 4.0)
|
||||
// We further optimize a few light invariant terms
|
||||
// brdfData.normalizationTerm = (roughness + 0.5) * 4.0 rewritten as roughness * 4.0 + 2.0 to a fit a MAD.
|
||||
float d = NoH * NoH * brdfData.roughness2MinusOne + 1.00001f;
|
||||
|
||||
half LoH2 = LoH * LoH;
|
||||
half specularTerm = brdfData.roughness2 / ((d * d) * max(0.1h, LoH2) * brdfData.normalizationTerm);
|
||||
|
||||
// On platforms where half actually means something, the denominator has a risk of overflow
|
||||
// clamp below was added specifically to "fix" that, but dx compiler (we convert bytecode to metal/gles)
|
||||
// sees that specularTerm have only non-negative terms, so it skips max(0,..) in clamp (leaving only min(100,...))
|
||||
#if defined (SHADER_API_MOBILE) || defined (SHADER_API_SWITCH)
|
||||
specularTerm = specularTerm - HALF_MIN;
|
||||
specularTerm = clamp(specularTerm, 0.0, 100.0); // Prevent FP16 overflow on mobiles
|
||||
#endif
|
||||
|
||||
return specularTerm;
|
||||
}
|
||||
|
||||
// Based on Minimalist CookTorrance BRDF
|
||||
// Implementation is slightly different from original derivation: http://www.thetenthplanet.de/archives/255
|
||||
//
|
||||
// * NDF [Modified] GGX
|
||||
// * Modified Kelemen and Szirmay-Kalos for Visibility term
|
||||
// * Fresnel approximated with 1/LdotH
|
||||
half3 DirectBDRF(BRDFData brdfData, half3 normalWS, half3 lightDirectionWS, half3 viewDirectionWS, bool specularHighlightsOff)
|
||||
{
|
||||
// Can still do compile-time optimisation.
|
||||
// If no compile-time optimized, extra overhead if branch taken is around +2.5% on Switch, -10% if not taken.
|
||||
[branch] if (!specularHighlightsOff)
|
||||
{
|
||||
half specularTerm = DirectBRDFSpecular(brdfData, normalWS, lightDirectionWS, viewDirectionWS);
|
||||
half3 color = brdfData.diffuse + specularTerm * brdfData.specular;
|
||||
return color;
|
||||
}
|
||||
else
|
||||
return brdfData.diffuse;
|
||||
}
|
||||
|
||||
// Based on Minimalist CookTorrance BRDF
|
||||
// Implementation is slightly different from original derivation: http://www.thetenthplanet.de/archives/255
|
||||
//
|
||||
// * NDF [Modified] GGX
|
||||
// * Modified Kelemen and Szirmay-Kalos for Visibility term
|
||||
// * Fresnel approximated with 1/LdotH
|
||||
half3 DirectBRDF(BRDFData brdfData, half3 normalWS, half3 lightDirectionWS, half3 viewDirectionWS)
|
||||
{
|
||||
#ifndef _SPECULARHIGHLIGHTS_OFF
|
||||
return brdfData.diffuse + DirectBRDFSpecular(brdfData, normalWS, lightDirectionWS, viewDirectionWS) * brdfData.specular;
|
||||
#else
|
||||
return brdfData.diffuse;
|
||||
#endif
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Global Illumination //
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Ambient occlusion
|
||||
TEXTURE2D_X(_ScreenSpaceOcclusionTexture);
|
||||
SAMPLER(sampler_ScreenSpaceOcclusionTexture);
|
||||
|
||||
struct AmbientOcclusionFactor
|
||||
{
|
||||
half indirectAmbientOcclusion;
|
||||
half directAmbientOcclusion;
|
||||
};
|
||||
|
||||
half SampleAmbientOcclusion(float2 normalizedScreenSpaceUV)
|
||||
{
|
||||
float2 uv = UnityStereoTransformScreenSpaceTex(normalizedScreenSpaceUV);
|
||||
return SAMPLE_TEXTURE2D_X(_ScreenSpaceOcclusionTexture, sampler_ScreenSpaceOcclusionTexture, uv).x;
|
||||
}
|
||||
|
||||
AmbientOcclusionFactor GetScreenSpaceAmbientOcclusion(float2 normalizedScreenSpaceUV)
|
||||
{
|
||||
AmbientOcclusionFactor aoFactor;
|
||||
aoFactor.indirectAmbientOcclusion = SampleAmbientOcclusion(normalizedScreenSpaceUV);
|
||||
aoFactor.directAmbientOcclusion = lerp(1.0, aoFactor.indirectAmbientOcclusion, _AmbientOcclusionParam.w);
|
||||
return aoFactor;
|
||||
}
|
||||
|
||||
// Samples SH L0, L1 and L2 terms
|
||||
half3 SampleSH(half3 normalWS)
|
||||
{
|
||||
// LPPV is not supported in Ligthweight Pipeline
|
||||
real4 SHCoefficients[7];
|
||||
SHCoefficients[0] = unity_SHAr;
|
||||
SHCoefficients[1] = unity_SHAg;
|
||||
SHCoefficients[2] = unity_SHAb;
|
||||
SHCoefficients[3] = unity_SHBr;
|
||||
SHCoefficients[4] = unity_SHBg;
|
||||
SHCoefficients[5] = unity_SHBb;
|
||||
SHCoefficients[6] = unity_SHC;
|
||||
|
||||
return max(half3(0, 0, 0), SampleSH9(SHCoefficients, normalWS));
|
||||
}
|
||||
|
||||
// SH Vertex Evaluation. Depending on target SH sampling might be
|
||||
// done completely per vertex or mixed with L2 term per vertex and L0, L1
|
||||
// per pixel. See SampleSHPixel
|
||||
half3 SampleSHVertex(half3 normalWS)
|
||||
{
|
||||
#if defined(EVALUATE_SH_VERTEX)
|
||||
return SampleSH(normalWS);
|
||||
#elif defined(EVALUATE_SH_MIXED)
|
||||
// no max since this is only L2 contribution
|
||||
return SHEvalLinearL2(normalWS, unity_SHBr, unity_SHBg, unity_SHBb, unity_SHC);
|
||||
#endif
|
||||
|
||||
// Fully per-pixel. Nothing to compute.
|
||||
return half3(0.0, 0.0, 0.0);
|
||||
}
|
||||
|
||||
// SH Pixel Evaluation. Depending on target SH sampling might be done
|
||||
// mixed or fully in pixel. See SampleSHVertex
|
||||
half3 SampleSHPixel(half3 L2Term, half3 normalWS)
|
||||
{
|
||||
#if defined(EVALUATE_SH_VERTEX)
|
||||
return L2Term;
|
||||
#elif defined(EVALUATE_SH_MIXED)
|
||||
half3 L0L1Term = SHEvalLinearL0L1(normalWS, unity_SHAr, unity_SHAg, unity_SHAb);
|
||||
half3 res = L2Term + L0L1Term;
|
||||
#ifdef UNITY_COLORSPACE_GAMMA
|
||||
res = LinearToSRGB(res);
|
||||
#endif
|
||||
return max(half3(0, 0, 0), res);
|
||||
#endif
|
||||
|
||||
// Default: Evaluate SH fully per-pixel
|
||||
return SampleSH(normalWS);
|
||||
}
|
||||
|
||||
#if defined(UNITY_DOTS_INSTANCING_ENABLED)
|
||||
#define LIGHTMAP_NAME unity_Lightmaps
|
||||
#define LIGHTMAP_INDIRECTION_NAME unity_LightmapsInd
|
||||
#define LIGHTMAP_SAMPLER_NAME samplerunity_Lightmaps
|
||||
#define LIGHTMAP_SAMPLE_EXTRA_ARGS lightmapUV, unity_LightmapIndex.x
|
||||
#else
|
||||
#define LIGHTMAP_NAME unity_Lightmap
|
||||
#define LIGHTMAP_INDIRECTION_NAME unity_LightmapInd
|
||||
#define LIGHTMAP_SAMPLER_NAME samplerunity_Lightmap
|
||||
#define LIGHTMAP_SAMPLE_EXTRA_ARGS lightmapUV
|
||||
#endif
|
||||
|
||||
// Sample baked lightmap. Non-Direction and Directional if available.
|
||||
// Realtime GI is not supported.
|
||||
half3 SampleLightmap(float2 lightmapUV, half3 normalWS)
|
||||
{
|
||||
#ifdef UNITY_LIGHTMAP_FULL_HDR
|
||||
bool encodedLightmap = false;
|
||||
#else
|
||||
bool encodedLightmap = true;
|
||||
#endif
|
||||
|
||||
half4 decodeInstructions = half4(LIGHTMAP_HDR_MULTIPLIER, LIGHTMAP_HDR_EXPONENT, 0.0h, 0.0h);
|
||||
|
||||
// The shader library sample lightmap functions transform the lightmap uv coords to apply bias and scale.
|
||||
// However, universal pipeline already transformed those coords in vertex. We pass half4(1, 1, 0, 0) and
|
||||
// the compiler will optimize the transform away.
|
||||
half4 transformCoords = half4(1, 1, 0, 0);
|
||||
|
||||
#if defined(LIGHTMAP_ON) && defined(DIRLIGHTMAP_COMBINED)
|
||||
return SampleDirectionalLightmap(TEXTURE2D_LIGHTMAP_ARGS(LIGHTMAP_NAME, LIGHTMAP_SAMPLER_NAME),
|
||||
TEXTURE2D_LIGHTMAP_ARGS(LIGHTMAP_INDIRECTION_NAME, LIGHTMAP_SAMPLER_NAME),
|
||||
LIGHTMAP_SAMPLE_EXTRA_ARGS, transformCoords, normalWS, encodedLightmap, decodeInstructions);
|
||||
#elif defined(LIGHTMAP_ON)
|
||||
return SampleSingleLightmap(TEXTURE2D_LIGHTMAP_ARGS(LIGHTMAP_NAME, LIGHTMAP_SAMPLER_NAME), LIGHTMAP_SAMPLE_EXTRA_ARGS, transformCoords, encodedLightmap, decodeInstructions);
|
||||
#else
|
||||
return half3(0.0, 0.0, 0.0);
|
||||
#endif
|
||||
}
|
||||
|
||||
// We either sample GI from baked lightmap or from probes.
|
||||
// If lightmap: sampleData.xy = lightmapUV
|
||||
// If probe: sampleData.xyz = L2 SH terms
|
||||
#if defined(LIGHTMAP_ON)
|
||||
#define SAMPLE_GI(lmName, shName, normalWSName) SampleLightmap(lmName, normalWSName)
|
||||
#else
|
||||
#define SAMPLE_GI(lmName, shName, normalWSName) SampleSHPixel(shName, normalWSName)
|
||||
#endif
|
||||
|
||||
half3 GlossyEnvironmentReflection(half3 reflectVector, half perceptualRoughness, half occlusion)
|
||||
{
|
||||
#if !defined(_ENVIRONMENTREFLECTIONS_OFF)
|
||||
half mip = PerceptualRoughnessToMipmapLevel(perceptualRoughness);
|
||||
half4 encodedIrradiance = SAMPLE_TEXTURECUBE_LOD(unity_SpecCube0, samplerunity_SpecCube0, reflectVector, mip);
|
||||
|
||||
//TODO:DOTS - we need to port probes to live in c# so we can manage this manually.
|
||||
#if defined(UNITY_USE_NATIVE_HDR) || defined(UNITY_DOTS_INSTANCING_ENABLED)
|
||||
half3 irradiance = encodedIrradiance.rgb;
|
||||
#else
|
||||
half3 irradiance = DecodeHDREnvironment(encodedIrradiance, unity_SpecCube0_HDR);
|
||||
#endif
|
||||
|
||||
return irradiance * occlusion;
|
||||
#endif // GLOSSY_REFLECTIONS
|
||||
|
||||
return _GlossyEnvironmentColor.rgb * occlusion;
|
||||
}
|
||||
|
||||
half3 SubtractDirectMainLightFromLightmap(Light mainLight, half3 normalWS, half3 bakedGI)
|
||||
{
|
||||
// Let's try to make realtime shadows work on a surface, which already contains
|
||||
// baked lighting and shadowing from the main sun light.
|
||||
// Summary:
|
||||
// 1) Calculate possible value in the shadow by subtracting estimated light contribution from the places occluded by realtime shadow:
|
||||
// a) preserves other baked lights and light bounces
|
||||
// b) eliminates shadows on the geometry facing away from the light
|
||||
// 2) Clamp against user defined ShadowColor.
|
||||
// 3) Pick original lightmap value, if it is the darkest one.
|
||||
|
||||
|
||||
// 1) Gives good estimate of illumination as if light would've been shadowed during the bake.
|
||||
// We only subtract the main direction light. This is accounted in the contribution term below.
|
||||
half shadowStrength = GetMainLightShadowStrength();
|
||||
half contributionTerm = saturate(dot(mainLight.direction, normalWS));
|
||||
half3 lambert = mainLight.color * contributionTerm;
|
||||
half3 estimatedLightContributionMaskedByInverseOfShadow = lambert * (1.0 - mainLight.shadowAttenuation);
|
||||
half3 subtractedLightmap = bakedGI - estimatedLightContributionMaskedByInverseOfShadow;
|
||||
|
||||
// 2) Allows user to define overall ambient of the scene and control situation when realtime shadow becomes too dark.
|
||||
half3 realtimeShadow = max(subtractedLightmap, _SubtractiveShadowColor.xyz);
|
||||
realtimeShadow = lerp(bakedGI, realtimeShadow, shadowStrength);
|
||||
|
||||
// 3) Pick darkest color
|
||||
return min(bakedGI, realtimeShadow);
|
||||
}
|
||||
|
||||
half3 GlobalIllumination(BRDFData brdfData, BRDFData brdfDataClearCoat, float clearCoatMask,
|
||||
half3 bakedGI, half occlusion,
|
||||
half3 normalWS, half3 viewDirectionWS)
|
||||
{
|
||||
half3 reflectVector = reflect(-viewDirectionWS, normalWS);
|
||||
half NoV = saturate(dot(normalWS, viewDirectionWS));
|
||||
half fresnelTerm = Pow4(1.0 - NoV);
|
||||
|
||||
half3 indirectDiffuse = bakedGI * occlusion;
|
||||
half3 indirectSpecular = GlossyEnvironmentReflection(reflectVector, brdfData.perceptualRoughness, occlusion);
|
||||
|
||||
half3 color = EnvironmentBRDF(brdfData, indirectDiffuse, indirectSpecular, fresnelTerm);
|
||||
|
||||
#if defined(_CLEARCOAT) || defined(_CLEARCOATMAP)
|
||||
half3 coatIndirectSpecular = GlossyEnvironmentReflection(reflectVector, brdfDataClearCoat.perceptualRoughness, occlusion);
|
||||
// TODO: "grazing term" causes problems on full roughness
|
||||
half3 coatColor = EnvironmentBRDFClearCoat(brdfDataClearCoat, clearCoatMask, coatIndirectSpecular, fresnelTerm);
|
||||
|
||||
// Blend with base layer using khronos glTF recommended way using NoV
|
||||
// Smooth surface & "ambiguous" lighting
|
||||
// NOTE: fresnelTerm (above) is pow4 instead of pow5, but should be ok as blend weight.
|
||||
half coatFresnel = kDielectricSpec.x + kDielectricSpec.a * fresnelTerm;
|
||||
return color * (1.0 - coatFresnel * clearCoatMask) + coatColor;
|
||||
#else
|
||||
return color;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Backwards compatiblity
|
||||
half3 GlobalIllumination(BRDFData brdfData, half3 bakedGI, half occlusion, half3 normalWS, half3 viewDirectionWS)
|
||||
{
|
||||
const BRDFData noClearCoat = (BRDFData)0;
|
||||
return GlobalIllumination(brdfData, noClearCoat, 0.0, bakedGI, occlusion, normalWS, viewDirectionWS);
|
||||
}
|
||||
|
||||
void MixRealtimeAndBakedGI(inout Light light, half3 normalWS, inout half3 bakedGI)
|
||||
{
|
||||
#if defined(LIGHTMAP_ON) && defined(_MIXED_LIGHTING_SUBTRACTIVE)
|
||||
bakedGI = SubtractDirectMainLightFromLightmap(light, normalWS, bakedGI);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Backwards compatiblity
|
||||
void MixRealtimeAndBakedGI(inout Light light, half3 normalWS, inout half3 bakedGI, half4 shadowMask)
|
||||
{
|
||||
MixRealtimeAndBakedGI(light, normalWS, bakedGI);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Lighting Functions //
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
half3 LightingLambert(half3 lightColor, half3 lightDir, half3 normal)
|
||||
{
|
||||
half NdotL = saturate(dot(normal, lightDir));
|
||||
return lightColor * NdotL;
|
||||
}
|
||||
|
||||
half3 LightingSpecular(half3 lightColor, half3 lightDir, half3 normal, half3 viewDir, half4 specular, half smoothness)
|
||||
{
|
||||
float3 halfVec = SafeNormalize(float3(lightDir) + float3(viewDir));
|
||||
half NdotH = saturate(dot(normal, halfVec));
|
||||
half modifier = pow(NdotH, smoothness);
|
||||
half3 specularReflection = specular.rgb * modifier;
|
||||
return lightColor * specularReflection;
|
||||
}
|
||||
|
||||
half3 LightingPhysicallyBased(BRDFData brdfData, BRDFData brdfDataClearCoat,
|
||||
half3 lightColor, half3 lightDirectionWS, half lightAttenuation,
|
||||
half3 normalWS, half3 viewDirectionWS,
|
||||
half clearCoatMask, bool specularHighlightsOff)
|
||||
{
|
||||
half NdotL = saturate(dot(normalWS, lightDirectionWS));
|
||||
half3 radiance = lightColor * (lightAttenuation * NdotL);
|
||||
|
||||
half3 brdf = brdfData.diffuse;
|
||||
#ifndef _SPECULARHIGHLIGHTS_OFF
|
||||
[branch] if (!specularHighlightsOff)
|
||||
{
|
||||
brdf += brdfData.specular * DirectBRDFSpecular(brdfData, normalWS, lightDirectionWS, viewDirectionWS);
|
||||
|
||||
#if defined(_CLEARCOAT) || defined(_CLEARCOATMAP)
|
||||
// Clear coat evaluates the specular a second timw and has some common terms with the base specular.
|
||||
// We rely on the compiler to merge these and compute them only once.
|
||||
half brdfCoat = kDielectricSpec.r * DirectBRDFSpecular(brdfDataClearCoat, normalWS, lightDirectionWS, viewDirectionWS);
|
||||
|
||||
// Mix clear coat and base layer using khronos glTF recommended formula
|
||||
// https://github.com/KhronosGroup/glTF/blob/master/extensions/2.0/Khronos/KHR_materials_clearcoat/README.md
|
||||
// Use NoV for direct too instead of LoH as an optimization (NoV is light invariant).
|
||||
half NoV = saturate(dot(normalWS, viewDirectionWS));
|
||||
// Use slightly simpler fresnelTerm (Pow4 vs Pow5) as a small optimization.
|
||||
// It is matching fresnel used in the GI/Env, so should produce a consistent clear coat blend (env vs. direct)
|
||||
half coatFresnel = kDielectricSpec.x + kDielectricSpec.a * Pow4(1.0 - NoV);
|
||||
|
||||
brdf = brdf * (1.0 - clearCoatMask * coatFresnel) + brdfCoat * clearCoatMask;
|
||||
#endif // _CLEARCOAT
|
||||
}
|
||||
#endif // _SPECULARHIGHLIGHTS_OFF
|
||||
|
||||
return brdf * radiance;
|
||||
}
|
||||
|
||||
half3 LightingPhysicallyBased(BRDFData brdfData, BRDFData brdfDataClearCoat, Light light, half3 normalWS, half3 viewDirectionWS, half clearCoatMask, bool specularHighlightsOff)
|
||||
{
|
||||
return LightingPhysicallyBased(brdfData, brdfDataClearCoat, light.color, light.direction, light.distanceAttenuation * light.shadowAttenuation, normalWS, viewDirectionWS, clearCoatMask, specularHighlightsOff);
|
||||
}
|
||||
|
||||
// Backwards compatibility
|
||||
half3 LightingPhysicallyBased(BRDFData brdfData, Light light, half3 normalWS, half3 viewDirectionWS)
|
||||
{
|
||||
#ifdef _SPECULARHIGHLIGHTS_OFF
|
||||
bool specularHighlightsOff = true;
|
||||
#else
|
||||
bool specularHighlightsOff = false;
|
||||
#endif
|
||||
const BRDFData noClearCoat = (BRDFData)0;
|
||||
return LightingPhysicallyBased(brdfData, noClearCoat, light, normalWS, viewDirectionWS, 0.0, specularHighlightsOff);
|
||||
}
|
||||
|
||||
half3 LightingPhysicallyBased(BRDFData brdfData, half3 lightColor, half3 lightDirectionWS, half lightAttenuation, half3 normalWS, half3 viewDirectionWS)
|
||||
{
|
||||
Light light;
|
||||
light.color = lightColor;
|
||||
light.direction = lightDirectionWS;
|
||||
light.distanceAttenuation = lightAttenuation;
|
||||
light.shadowAttenuation = 1;
|
||||
return LightingPhysicallyBased(brdfData, light, normalWS, viewDirectionWS);
|
||||
}
|
||||
|
||||
half3 LightingPhysicallyBased(BRDFData brdfData, Light light, half3 normalWS, half3 viewDirectionWS, bool specularHighlightsOff)
|
||||
{
|
||||
const BRDFData noClearCoat = (BRDFData)0;
|
||||
return LightingPhysicallyBased(brdfData, noClearCoat, light, normalWS, viewDirectionWS, 0.0, specularHighlightsOff);
|
||||
}
|
||||
|
||||
half3 LightingPhysicallyBased(BRDFData brdfData, half3 lightColor, half3 lightDirectionWS, half lightAttenuation, half3 normalWS, half3 viewDirectionWS, bool specularHighlightsOff)
|
||||
{
|
||||
Light light;
|
||||
light.color = lightColor;
|
||||
light.direction = lightDirectionWS;
|
||||
light.distanceAttenuation = lightAttenuation;
|
||||
light.shadowAttenuation = 1;
|
||||
return LightingPhysicallyBased(brdfData, light, viewDirectionWS, specularHighlightsOff, specularHighlightsOff);
|
||||
}
|
||||
|
||||
half3 VertexLighting(float3 positionWS, half3 normalWS)
|
||||
{
|
||||
half3 vertexLightColor = half3(0.0, 0.0, 0.0);
|
||||
|
||||
#ifdef _ADDITIONAL_LIGHTS_VERTEX
|
||||
uint lightsCount = GetAdditionalLightsCount();
|
||||
for (uint lightIndex = 0u; lightIndex < lightsCount; ++lightIndex)
|
||||
{
|
||||
Light light = GetAdditionalLight(lightIndex, positionWS);
|
||||
half3 lightColor = light.color * light.distanceAttenuation;
|
||||
vertexLightColor += LightingLambert(lightColor, light.direction, normalWS);
|
||||
}
|
||||
#endif
|
||||
|
||||
return vertexLightColor;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Fragment Functions //
|
||||
// Used by ShaderGraph and others builtin renderers //
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
half4 UniversalFragmentPBR(InputData inputData, SurfaceData surfaceData)
|
||||
{
|
||||
#ifdef _SPECULARHIGHLIGHTS_OFF
|
||||
bool specularHighlightsOff = true;
|
||||
#else
|
||||
bool specularHighlightsOff = false;
|
||||
#endif
|
||||
|
||||
BRDFData brdfData;
|
||||
|
||||
// NOTE: can modify alpha
|
||||
InitializeBRDFData(surfaceData.albedo, surfaceData.metallic, surfaceData.specular, surfaceData.smoothness, surfaceData.alpha, brdfData);
|
||||
|
||||
BRDFData brdfDataClearCoat = (BRDFData)0;
|
||||
#if defined(_CLEARCOAT) || defined(_CLEARCOATMAP)
|
||||
// base brdfData is modified here, rely on the compiler to eliminate dead computation by InitializeBRDFData()
|
||||
InitializeBRDFDataClearCoat(surfaceData.clearCoatMask, surfaceData.clearCoatSmoothness, brdfData, brdfDataClearCoat);
|
||||
#endif
|
||||
|
||||
// To ensure backward compatibility we have to avoid using shadowMask input, as it is not present in older shaders
|
||||
#if defined(SHADOWS_SHADOWMASK) && defined(LIGHTMAP_ON)
|
||||
half4 shadowMask = inputData.shadowMask;
|
||||
#elif !defined (LIGHTMAP_ON)
|
||||
half4 shadowMask = unity_ProbesOcclusion;
|
||||
#else
|
||||
half4 shadowMask = half4(1, 1, 1, 1);
|
||||
#endif
|
||||
|
||||
Light mainLight = GetMainLight(inputData.shadowCoord, inputData.positionWS, shadowMask);
|
||||
|
||||
#if defined(_SCREEN_SPACE_OCCLUSION)
|
||||
AmbientOcclusionFactor aoFactor = GetScreenSpaceAmbientOcclusion(inputData.normalizedScreenSpaceUV);
|
||||
mainLight.color *= aoFactor.directAmbientOcclusion;
|
||||
surfaceData.occlusion = min(surfaceData.occlusion, aoFactor.indirectAmbientOcclusion);
|
||||
#endif
|
||||
|
||||
MixRealtimeAndBakedGI(mainLight, inputData.normalWS, inputData.bakedGI);
|
||||
half3 color = GlobalIllumination(brdfData, brdfDataClearCoat, surfaceData.clearCoatMask,
|
||||
inputData.bakedGI, surfaceData.occlusion,
|
||||
inputData.normalWS, inputData.viewDirectionWS);
|
||||
color += LightingPhysicallyBased(brdfData, brdfDataClearCoat,
|
||||
mainLight,
|
||||
inputData.normalWS, inputData.viewDirectionWS,
|
||||
surfaceData.clearCoatMask, specularHighlightsOff);
|
||||
|
||||
#ifdef _ADDITIONAL_LIGHTS
|
||||
uint pixelLightCount = GetAdditionalLightsCount();
|
||||
for (uint lightIndex = 0u; lightIndex < pixelLightCount; ++lightIndex)
|
||||
{
|
||||
Light light = GetAdditionalLight(lightIndex, inputData.positionWS, shadowMask);
|
||||
#if defined(_SCREEN_SPACE_OCCLUSION)
|
||||
light.color *= aoFactor.directAmbientOcclusion;
|
||||
#endif
|
||||
color += LightingPhysicallyBased(brdfData, brdfDataClearCoat,
|
||||
light,
|
||||
inputData.normalWS, inputData.viewDirectionWS,
|
||||
surfaceData.clearCoatMask, specularHighlightsOff);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef _ADDITIONAL_LIGHTS_VERTEX
|
||||
color += inputData.vertexLighting * brdfData.diffuse;
|
||||
#endif
|
||||
|
||||
color += surfaceData.emission;
|
||||
|
||||
return half4(color, surfaceData.alpha);
|
||||
}
|
||||
|
||||
half4 UniversalFragmentPBR(InputData inputData, half3 albedo, half metallic, half3 specular,
|
||||
half smoothness, half occlusion, half3 emission, half alpha)
|
||||
{
|
||||
SurfaceData s;
|
||||
s.albedo = albedo;
|
||||
s.metallic = metallic;
|
||||
s.specular = specular;
|
||||
s.smoothness = smoothness;
|
||||
s.occlusion = occlusion;
|
||||
s.emission = emission;
|
||||
s.alpha = alpha;
|
||||
s.clearCoatMask = 0.0;
|
||||
s.clearCoatSmoothness = 1.0;
|
||||
return UniversalFragmentPBR(inputData, s);
|
||||
}
|
||||
|
||||
half4 UniversalFragmentBlinnPhong(InputData inputData, half3 diffuse, half4 specularGloss, half smoothness, half3 emission, half alpha)
|
||||
{
|
||||
// To ensure backward compatibility we have to avoid using shadowMask input, as it is not present in older shaders
|
||||
#if defined(SHADOWS_SHADOWMASK) && defined(LIGHTMAP_ON)
|
||||
half4 shadowMask = inputData.shadowMask;
|
||||
#elif !defined (LIGHTMAP_ON)
|
||||
half4 shadowMask = unity_ProbesOcclusion;
|
||||
#else
|
||||
half4 shadowMask = half4(1, 1, 1, 1);
|
||||
#endif
|
||||
|
||||
Light mainLight = GetMainLight(inputData.shadowCoord, inputData.positionWS, shadowMask);
|
||||
|
||||
#if defined(_SCREEN_SPACE_OCCLUSION)
|
||||
AmbientOcclusionFactor aoFactor = GetScreenSpaceAmbientOcclusion(inputData.normalizedScreenSpaceUV);
|
||||
mainLight.color *= aoFactor.directAmbientOcclusion;
|
||||
inputData.bakedGI *= aoFactor.indirectAmbientOcclusion;
|
||||
#endif
|
||||
|
||||
MixRealtimeAndBakedGI(mainLight, inputData.normalWS, inputData.bakedGI);
|
||||
|
||||
half3 attenuatedLightColor = mainLight.color * (mainLight.distanceAttenuation * mainLight.shadowAttenuation);
|
||||
half3 diffuseColor = inputData.bakedGI + LightingLambert(attenuatedLightColor, mainLight.direction, inputData.normalWS);
|
||||
half3 specularColor = LightingSpecular(attenuatedLightColor, mainLight.direction, inputData.normalWS, inputData.viewDirectionWS, specularGloss, smoothness);
|
||||
|
||||
#ifdef _ADDITIONAL_LIGHTS
|
||||
uint pixelLightCount = GetAdditionalLightsCount();
|
||||
for (uint lightIndex = 0u; lightIndex < pixelLightCount; ++lightIndex)
|
||||
{
|
||||
Light light = GetAdditionalLight(lightIndex, inputData.positionWS, shadowMask);
|
||||
#if defined(_SCREEN_SPACE_OCCLUSION)
|
||||
light.color *= aoFactor.directAmbientOcclusion;
|
||||
#endif
|
||||
half3 attenuatedLightColor = light.color * (light.distanceAttenuation * light.shadowAttenuation);
|
||||
diffuseColor += LightingLambert(attenuatedLightColor, light.direction, inputData.normalWS);
|
||||
specularColor += LightingSpecular(attenuatedLightColor, light.direction, inputData.normalWS, inputData.viewDirectionWS, specularGloss, smoothness);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef _ADDITIONAL_LIGHTS_VERTEX
|
||||
diffuseColor += inputData.vertexLighting;
|
||||
#endif
|
||||
|
||||
half3 finalColor = diffuseColor * diffuse + emission;
|
||||
|
||||
#if defined(_SPECGLOSSMAP) || defined(_SPECULAR_COLOR)
|
||||
finalColor += specularColor;
|
||||
#endif
|
||||
|
||||
return half4(finalColor, alpha);
|
||||
}
|
||||
|
||||
//LWRP -> Universal Backwards Compatibility
|
||||
half4 LightweightFragmentPBR(InputData inputData, half3 albedo, half metallic, half3 specular,
|
||||
half smoothness, half occlusion, half3 emission, half alpha)
|
||||
{
|
||||
return UniversalFragmentPBR(inputData, albedo, metallic, specular, smoothness, occlusion, emission, alpha);
|
||||
}
|
||||
|
||||
half4 LightweightFragmentBlinnPhong(InputData inputData, half3 diffuse, half4 specularGloss, half smoothness, half3 emission, half alpha)
|
||||
{
|
||||
return UniversalFragmentBlinnPhong(inputData, diffuse, specularGloss, smoothness, emission, alpha);
|
||||
}
|
||||
#endif
|
@@ -0,0 +1,70 @@
|
||||
#ifndef UNIVERSAL_META_PASS_INCLUDED
|
||||
#define UNIVERSAL_META_PASS_INCLUDED
|
||||
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Lighting.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Color.hlsl"
|
||||
|
||||
CBUFFER_START(UnityMetaPass)
|
||||
// x = use uv1 as raster position
|
||||
// y = use uv2 as raster position
|
||||
bool4 unity_MetaVertexControl;
|
||||
|
||||
// x = return albedo
|
||||
// y = return normal
|
||||
bool4 unity_MetaFragmentControl;
|
||||
CBUFFER_END
|
||||
|
||||
float unity_OneOverOutputBoost;
|
||||
float unity_MaxOutputValue;
|
||||
float unity_UseLinearSpace;
|
||||
|
||||
struct MetaInput
|
||||
{
|
||||
half3 Albedo;
|
||||
half3 Emission;
|
||||
half3 SpecularColor;
|
||||
};
|
||||
|
||||
float4 MetaVertexPosition(float4 positionOS, float2 uv1, float2 uv2, float4 uv1ST, float4 uv2ST)
|
||||
{
|
||||
if (unity_MetaVertexControl.x)
|
||||
{
|
||||
positionOS.xy = uv1 * uv1ST.xy + uv1ST.zw;
|
||||
// OpenGL right now needs to actually use incoming vertex position,
|
||||
// so use it in a very dummy way
|
||||
positionOS.z = positionOS.z > 0 ? REAL_MIN : 0.0f;
|
||||
}
|
||||
if (unity_MetaVertexControl.y)
|
||||
{
|
||||
positionOS.xy = uv2 * uv2ST.xy + uv2ST.zw;
|
||||
// OpenGL right now needs to actually use incoming vertex position,
|
||||
// so use it in a very dummy way
|
||||
positionOS.z = positionOS.z > 0 ? REAL_MIN : 0.0f;
|
||||
}
|
||||
return TransformWorldToHClip(positionOS.xyz);
|
||||
}
|
||||
|
||||
half4 MetaFragment(MetaInput input)
|
||||
{
|
||||
half4 res = 0;
|
||||
if (unity_MetaFragmentControl.x)
|
||||
{
|
||||
res = half4(input.Albedo, 1.0);
|
||||
|
||||
// Apply Albedo Boost from LightmapSettings.
|
||||
res.rgb = clamp(PositivePow(res.rgb, saturate(unity_OneOverOutputBoost)), 0, unity_MaxOutputValue);
|
||||
}
|
||||
if (unity_MetaFragmentControl.y)
|
||||
{
|
||||
half3 emission;
|
||||
if (unity_UseLinearSpace)
|
||||
emission = input.Emission;
|
||||
else
|
||||
emission = LinearToSRGB(input.Emission);
|
||||
|
||||
res = half4(emission, 1.0);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
#endif
|
@@ -0,0 +1,228 @@
|
||||
#ifndef UNIVERSAL_PARTICLES_INCLUDED
|
||||
#define UNIVERSAL_PARTICLES_INCLUDED
|
||||
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Color.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/SurfaceInput.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/DeclareDepthTexture.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/DeclareOpaqueTexture.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/ParticlesInstancing.hlsl"
|
||||
|
||||
struct ParticleParams
|
||||
{
|
||||
float4 positionWS;
|
||||
float4 vertexColor;
|
||||
float4 projectedPosition;
|
||||
half4 baseColor;
|
||||
float3 blendUv;
|
||||
float2 uv;
|
||||
};
|
||||
|
||||
void InitParticleParams(VaryingsParticle input, out ParticleParams output)
|
||||
{
|
||||
output = (ParticleParams) 0;
|
||||
output.uv = input.texcoord;
|
||||
output.vertexColor = input.color;
|
||||
|
||||
#if defined(_FLIPBOOKBLENDING_ON)
|
||||
output.blendUv = input.texcoord2AndBlend;
|
||||
#else
|
||||
output.blendUv = float3(0,0,0);
|
||||
#endif
|
||||
|
||||
#if !defined(PARTICLES_EDITOR_META_PASS)
|
||||
output.positionWS = input.positionWS;
|
||||
output.baseColor = _BaseColor;
|
||||
|
||||
#if defined(_SOFTPARTICLES_ON) || defined(_FADING_ON) || defined(_DISTORTION_ON)
|
||||
output.projectedPosition = input.projectedPosition;
|
||||
#else
|
||||
output.projectedPosition = float4(0,0,0,0);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
// Pre-multiplied alpha helper
|
||||
#if defined(_ALPHAPREMULTIPLY_ON)
|
||||
#define ALBEDO_MUL albedo
|
||||
#else
|
||||
#define ALBEDO_MUL albedo.a
|
||||
#endif
|
||||
|
||||
#if defined(_ALPHAPREMULTIPLY_ON)
|
||||
#define SOFT_PARTICLE_MUL_ALBEDO(albedo, val) albedo * val
|
||||
#elif defined(_ALPHAMODULATE_ON)
|
||||
#define SOFT_PARTICLE_MUL_ALBEDO(albedo, val) half4(lerp(half3(1.0h, 1.0h, 1.0h), albedo.rgb, albedo.a * val), albedo.a * val)
|
||||
#else
|
||||
#define SOFT_PARTICLE_MUL_ALBEDO(albedo, val) albedo * half4(1.0h, 1.0h, 1.0h, val)
|
||||
#endif
|
||||
|
||||
// Color blending fragment function
|
||||
float4 MixParticleColor(float4 baseColor, float4 particleColor, float4 colorAddSubDiff)
|
||||
{
|
||||
#if defined(_COLOROVERLAY_ON) // Overlay blend
|
||||
float4 output = baseColor;
|
||||
output.rgb = lerp(1 - 2 * (1 - baseColor.rgb) * (1 - particleColor.rgb), 2 * baseColor.rgb * particleColor.rgb, step(baseColor.rgb, 0.5));
|
||||
output.a *= particleColor.a;
|
||||
return output;
|
||||
#elif defined(_COLORCOLOR_ON) // Color blend
|
||||
half3 aHSL = RgbToHsv(baseColor.rgb);
|
||||
half3 bHSL = RgbToHsv(particleColor.rgb);
|
||||
half3 rHSL = half3(bHSL.x, bHSL.y, aHSL.z);
|
||||
return half4(HsvToRgb(rHSL), baseColor.a * particleColor.a);
|
||||
#elif defined(_COLORADDSUBDIFF_ON) // Additive, Subtractive and Difference blends based on 'colorAddSubDiff'
|
||||
float4 output = baseColor;
|
||||
output.rgb = baseColor.rgb + particleColor.rgb * colorAddSubDiff.x;
|
||||
output.rgb = lerp(output.rgb, abs(output.rgb), colorAddSubDiff.y);
|
||||
output.a *= particleColor.a;
|
||||
return output;
|
||||
#else // Default to Multiply blend
|
||||
return baseColor * particleColor;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Soft particles - returns alpha value for fading particles based on the depth to the background pixel
|
||||
float SoftParticles(float near, float far, float4 projection)
|
||||
{
|
||||
float fade = 1;
|
||||
if (near > 0.0 || far > 0.0)
|
||||
{
|
||||
float sceneZ = LinearEyeDepth(SAMPLE_TEXTURE2D_X(_CameraDepthTexture, sampler_CameraDepthTexture, UnityStereoTransformScreenSpaceTex(projection.xy / projection.w)).r, _ZBufferParams);
|
||||
float thisZ = LinearEyeDepth(projection.z / projection.w, _ZBufferParams);
|
||||
fade = saturate(far * ((sceneZ - near) - thisZ));
|
||||
}
|
||||
return fade;
|
||||
}
|
||||
|
||||
// Soft particles - returns alpha value for fading particles based on the depth to the background pixel
|
||||
float SoftParticles(float near, float far, ParticleParams params)
|
||||
{
|
||||
float fade = 1;
|
||||
if (near > 0.0 || far > 0.0)
|
||||
{
|
||||
float rawDepth = SampleSceneDepth(params.projectedPosition.xy / params.projectedPosition.w);
|
||||
float sceneZ = LinearEyeDepth(rawDepth, _ZBufferParams);
|
||||
float thisZ = LinearEyeDepth(params.positionWS.xyz, GetWorldToViewMatrix());
|
||||
fade = saturate(far * ((sceneZ - near) - thisZ));
|
||||
}
|
||||
return fade;
|
||||
}
|
||||
|
||||
// Camera fade - returns alpha value for fading particles based on camera distance
|
||||
half CameraFade(float near, float far, float4 projection)
|
||||
{
|
||||
float thisZ = LinearEyeDepth(projection.z / projection.w, _ZBufferParams);
|
||||
return saturate((thisZ - near) * far);
|
||||
}
|
||||
|
||||
half3 AlphaModulate(half3 albedo, half alpha)
|
||||
{
|
||||
#if defined(_ALPHAMODULATE_ON)
|
||||
return lerp(half3(1.0h, 1.0h, 1.0h), albedo, alpha);
|
||||
#elif defined(_ALPHAPREMULTIPLY_ON)
|
||||
return albedo * alpha;
|
||||
#endif
|
||||
return albedo;
|
||||
}
|
||||
|
||||
half3 Distortion(float4 baseColor, float3 normal, half strength, half blend, float4 projection)
|
||||
{
|
||||
float2 screenUV = (projection.xy / projection.w) + normal.xy * strength * baseColor.a;
|
||||
screenUV = UnityStereoTransformScreenSpaceTex(screenUV);
|
||||
float4 Distortion = SAMPLE_TEXTURE2D_X(_CameraOpaqueTexture, sampler_CameraOpaqueTexture, screenUV);
|
||||
return lerp(Distortion.rgb, baseColor.rgb, saturate(baseColor.a - blend));
|
||||
}
|
||||
|
||||
// Sample a texture and do blending for texture sheet animation if needed
|
||||
half4 BlendTexture(TEXTURE2D_PARAM(_Texture, sampler_Texture), float2 uv, float3 blendUv)
|
||||
{
|
||||
half4 color = SAMPLE_TEXTURE2D(_Texture, sampler_Texture, uv);
|
||||
#ifdef _FLIPBOOKBLENDING_ON
|
||||
half4 color2 = SAMPLE_TEXTURE2D(_Texture, sampler_Texture, blendUv.xy);
|
||||
color = lerp(color, color2, blendUv.z);
|
||||
#endif
|
||||
return color;
|
||||
}
|
||||
|
||||
// Sample a normal map in tangent space
|
||||
half3 SampleNormalTS(float2 uv, float3 blendUv, TEXTURE2D_PARAM(bumpMap, sampler_bumpMap), half scale = 1.0h)
|
||||
{
|
||||
#if defined(_NORMALMAP)
|
||||
half4 n = BlendTexture(TEXTURE2D_ARGS(bumpMap, sampler_bumpMap), uv, blendUv);
|
||||
#if BUMP_SCALE_NOT_SUPPORTED
|
||||
return UnpackNormal(n);
|
||||
#else
|
||||
return UnpackNormalScale(n, scale);
|
||||
#endif
|
||||
#else
|
||||
return half3(0.0h, 0.0h, 1.0h);
|
||||
#endif
|
||||
}
|
||||
|
||||
half4 GetParticleColor(half4 color)
|
||||
{
|
||||
#if defined(UNITY_PARTICLE_INSTANCING_ENABLED)
|
||||
#if !defined(UNITY_PARTICLE_INSTANCE_DATA_NO_COLOR)
|
||||
UNITY_PARTICLE_INSTANCE_DATA data = unity_ParticleInstanceData[unity_InstanceID];
|
||||
color = lerp(half4(1.0, 1.0, 1.0, 1.0), color, unity_ParticleUseMeshColors);
|
||||
color *= UnpackFromR8G8B8A8(data.color);
|
||||
#endif
|
||||
#endif
|
||||
return color;
|
||||
}
|
||||
|
||||
void GetParticleTexcoords(out float2 outputTexcoord, out float3 outputTexcoord2AndBlend, in float4 inputTexcoords, in float inputBlend)
|
||||
{
|
||||
#if defined(UNITY_PARTICLE_INSTANCING_ENABLED)
|
||||
if (unity_ParticleUVShiftData.x != 0.0)
|
||||
{
|
||||
UNITY_PARTICLE_INSTANCE_DATA data = unity_ParticleInstanceData[unity_InstanceID];
|
||||
|
||||
float numTilesX = unity_ParticleUVShiftData.y;
|
||||
float2 animScale = unity_ParticleUVShiftData.zw;
|
||||
#ifdef UNITY_PARTICLE_INSTANCE_DATA_NO_ANIM_FRAME
|
||||
float sheetIndex = 0.0;
|
||||
#else
|
||||
float sheetIndex = data.animFrame;
|
||||
#endif
|
||||
|
||||
float index0 = floor(sheetIndex);
|
||||
float vIdx0 = floor(index0 / numTilesX);
|
||||
float uIdx0 = floor(index0 - vIdx0 * numTilesX);
|
||||
float2 offset0 = float2(uIdx0 * animScale.x, (1.0 - animScale.y) - vIdx0 * animScale.y); // Copied from built-in as is and it looks like upside-down flip
|
||||
|
||||
outputTexcoord = inputTexcoords.xy * animScale.xy + offset0.xy;
|
||||
|
||||
#ifdef _FLIPBOOKBLENDING_ON
|
||||
float index1 = floor(sheetIndex + 1.0);
|
||||
float vIdx1 = floor(index1 / numTilesX);
|
||||
float uIdx1 = floor(index1 - vIdx1 * numTilesX);
|
||||
float2 offset1 = float2(uIdx1 * animScale.x, (1.0 - animScale.y) - vIdx1 * animScale.y);
|
||||
|
||||
outputTexcoord2AndBlend.xy = inputTexcoords.xy * animScale.xy + offset1.xy;
|
||||
outputTexcoord2AndBlend.z = frac(sheetIndex);
|
||||
#endif
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
outputTexcoord = inputTexcoords.xy;
|
||||
#ifdef _FLIPBOOKBLENDING_ON
|
||||
outputTexcoord2AndBlend.xy = inputTexcoords.zw;
|
||||
outputTexcoord2AndBlend.z = inputBlend;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef _FLIPBOOKBLENDING_ON
|
||||
outputTexcoord2AndBlend.xy = inputTexcoords.xy;
|
||||
outputTexcoord2AndBlend.z = 0.5;
|
||||
#endif
|
||||
}
|
||||
|
||||
void GetParticleTexcoords(out float2 outputTexcoord, in float2 inputTexcoord)
|
||||
{
|
||||
float3 dummyTexcoord2AndBlend = 0.0;
|
||||
GetParticleTexcoords(outputTexcoord, dummyTexcoord2AndBlend, inputTexcoord.xyxy, 0.0);
|
||||
}
|
||||
|
||||
#endif // UNIVERSAL_PARTICLES_INCLUDED
|
@@ -0,0 +1,66 @@
|
||||
#ifndef UNIVERSAL_PARTICLESINSTANCING_INCLUDED
|
||||
#define UNIVERSAL_PARTICLESINSTANCING_INCLUDED
|
||||
|
||||
#if defined(UNITY_PROCEDURAL_INSTANCING_ENABLED) && !defined(SHADER_TARGET_SURFACE_ANALYSIS)
|
||||
#define UNITY_PARTICLE_INSTANCING_ENABLED
|
||||
#endif
|
||||
|
||||
#if defined(UNITY_PARTICLE_INSTANCING_ENABLED)
|
||||
|
||||
#ifndef UNITY_PARTICLE_INSTANCE_DATA
|
||||
#define UNITY_PARTICLE_INSTANCE_DATA DefaultParticleInstanceData
|
||||
#endif
|
||||
|
||||
struct DefaultParticleInstanceData
|
||||
{
|
||||
float3x4 transform;
|
||||
uint color;
|
||||
float animFrame;
|
||||
};
|
||||
|
||||
StructuredBuffer<UNITY_PARTICLE_INSTANCE_DATA> unity_ParticleInstanceData;
|
||||
float4 unity_ParticleUVShiftData;
|
||||
float unity_ParticleUseMeshColors;
|
||||
|
||||
void ParticleInstancingMatrices(out float4x4 objectToWorld, out float4x4 worldToObject)
|
||||
{
|
||||
UNITY_PARTICLE_INSTANCE_DATA data = unity_ParticleInstanceData[unity_InstanceID];
|
||||
|
||||
// transform matrix
|
||||
objectToWorld._11_21_31_41 = float4(data.transform._11_21_31, 0.0f);
|
||||
objectToWorld._12_22_32_42 = float4(data.transform._12_22_32, 0.0f);
|
||||
objectToWorld._13_23_33_43 = float4(data.transform._13_23_33, 0.0f);
|
||||
objectToWorld._14_24_34_44 = float4(data.transform._14_24_34, 1.0f);
|
||||
|
||||
// inverse transform matrix (TODO: replace with a library implementation if/when available)
|
||||
float3x3 worldToObject3x3;
|
||||
worldToObject3x3[0] = objectToWorld[1].yzx * objectToWorld[2].zxy - objectToWorld[1].zxy * objectToWorld[2].yzx;
|
||||
worldToObject3x3[1] = objectToWorld[0].zxy * objectToWorld[2].yzx - objectToWorld[0].yzx * objectToWorld[2].zxy;
|
||||
worldToObject3x3[2] = objectToWorld[0].yzx * objectToWorld[1].zxy - objectToWorld[0].zxy * objectToWorld[1].yzx;
|
||||
|
||||
float det = dot(objectToWorld[0].xyz, worldToObject3x3[0]);
|
||||
|
||||
worldToObject3x3 = transpose(worldToObject3x3);
|
||||
|
||||
worldToObject3x3 *= rcp(det);
|
||||
|
||||
float3 worldToObjectPosition = mul(worldToObject3x3, -objectToWorld._14_24_34);
|
||||
|
||||
worldToObject._11_21_31_41 = float4(worldToObject3x3._11_21_31, 0.0f);
|
||||
worldToObject._12_22_32_42 = float4(worldToObject3x3._12_22_32, 0.0f);
|
||||
worldToObject._13_23_33_43 = float4(worldToObject3x3._13_23_33, 0.0f);
|
||||
worldToObject._14_24_34_44 = float4(worldToObjectPosition, 1.0f);
|
||||
}
|
||||
|
||||
void ParticleInstancingSetup()
|
||||
{
|
||||
ParticleInstancingMatrices(unity_ObjectToWorld, unity_WorldToObject);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void ParticleInstancingSetup() {}
|
||||
|
||||
#endif
|
||||
|
||||
#endif // UNIVERSAL_PARTICLESINSTANCING_INCLUDED
|
@@ -0,0 +1,401 @@
|
||||
#ifndef UNIVERSAL_SSAO_INCLUDED
|
||||
#define UNIVERSAL_SSAO_INCLUDED
|
||||
|
||||
// Includes
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/ShaderVariablesFunctions.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/DeclareDepthTexture.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/DeclareNormalsTexture.hlsl"
|
||||
|
||||
// Textures & Samplers
|
||||
TEXTURE2D_X(_BaseMap);
|
||||
TEXTURE2D_X(_ScreenSpaceOcclusionTexture);
|
||||
|
||||
SAMPLER(sampler_BaseMap);
|
||||
SAMPLER(sampler_ScreenSpaceOcclusionTexture);
|
||||
|
||||
// Params
|
||||
float4 _BlurOffset;
|
||||
float4 _SSAOParams;
|
||||
float4 _SourceSize;
|
||||
|
||||
// SSAO Settings
|
||||
#define INTENSITY _SSAOParams.x
|
||||
#define RADIUS _SSAOParams.y
|
||||
#define DOWNSAMPLE _SSAOParams.z
|
||||
|
||||
// GLES2: In many cases, dynamic looping is not supported.
|
||||
#if defined(SHADER_API_GLES) && !defined(SHADER_API_GLES3)
|
||||
#define SAMPLE_COUNT 3
|
||||
#else
|
||||
#define SAMPLE_COUNT _SSAOParams.w
|
||||
#endif
|
||||
|
||||
// Function defines
|
||||
#define SCREEN_PARAMS GetScaledScreenParams()
|
||||
#define SAMPLE_BASEMAP(uv) SAMPLE_TEXTURE2D_X(_BaseMap, sampler_BaseMap, UnityStereoTransformScreenSpaceTex(uv));
|
||||
#define SAMPLE_BASEMAP_R(uv) SAMPLE_TEXTURE2D_X(_BaseMap, sampler_BaseMap, UnityStereoTransformScreenSpaceTex(uv)).r;
|
||||
|
||||
|
||||
// Constants
|
||||
// kContrast determines the contrast of occlusion. This allows users to control over/under
|
||||
// occlusion. At the moment, this is not exposed to the editor because it's rarely useful.
|
||||
static const float kContrast = 0.6;
|
||||
|
||||
// The constant below controls the geometry-awareness of the bilateral
|
||||
// filter. The higher value, the more sensitive it is.
|
||||
static const float kGeometryCoeff = 0.8;
|
||||
|
||||
// The constants below are used in the AO estimator. Beta is mainly used for suppressing
|
||||
// self-shadowing noise, and Epsilon is used to prevent calculation underflow. See the
|
||||
// paper (Morgan 2011 http://goo.gl/2iz3P) for further details of these constants.
|
||||
static const float kBeta = 0.002;
|
||||
#define EPSILON 1.0e-4
|
||||
|
||||
float4 PackAONormal(float ao, float3 n)
|
||||
{
|
||||
return float4(ao, n * 0.5 + 0.5);
|
||||
}
|
||||
|
||||
float3 GetPackedNormal(float4 p)
|
||||
{
|
||||
return p.gba * 2.0 - 1.0;
|
||||
}
|
||||
|
||||
float GetPackedAO(float4 p)
|
||||
{
|
||||
return p.r;
|
||||
}
|
||||
|
||||
float EncodeAO(float x)
|
||||
{
|
||||
#if UNITY_COLORSPACE_GAMMA
|
||||
return 1.0 - max(LinearToSRGB(1.0 - saturate(x)), 0.0);
|
||||
#else
|
||||
return x;
|
||||
#endif
|
||||
}
|
||||
|
||||
float CompareNormal(float3 d1, float3 d2)
|
||||
{
|
||||
return smoothstep(kGeometryCoeff, 1.0, dot(d1, d2));
|
||||
}
|
||||
|
||||
float2 GetScreenSpacePosition(float2 uv)
|
||||
{
|
||||
return uv * SCREEN_PARAMS.xy * DOWNSAMPLE;
|
||||
}
|
||||
|
||||
// Trigonometric function utility
|
||||
float2 CosSin(float theta)
|
||||
{
|
||||
float sn, cs;
|
||||
sincos(theta, sn, cs);
|
||||
return float2(cs, sn);
|
||||
}
|
||||
|
||||
// Pseudo random number generator with 2D coordinates
|
||||
float UVRandom(float u, float v)
|
||||
{
|
||||
float f = dot(float2(12.9898, 78.233), float2(u, v));
|
||||
return frac(43758.5453 * sin(f));
|
||||
}
|
||||
|
||||
// Sample point picker
|
||||
float3 PickSamplePoint(float2 uv, float randAddon, int index)
|
||||
{
|
||||
float2 positionSS = GetScreenSpacePosition(uv);
|
||||
float gn = InterleavedGradientNoise(positionSS, index);
|
||||
float u = frac(UVRandom(0.0, index + randAddon) + gn) * 2.0 - 1.0;
|
||||
float theta = (UVRandom(1.0, index + randAddon) + gn) * TWO_PI;
|
||||
return float3(CosSin(theta) * sqrt(1.0 - u * u), u);
|
||||
}
|
||||
|
||||
float RawToLinearDepth(float rawDepth)
|
||||
{
|
||||
#if defined(_ORTHOGRAPHIC)
|
||||
#if UNITY_REVERSED_Z
|
||||
return ((_ProjectionParams.z - _ProjectionParams.y) * (1.0 - rawDepth) + _ProjectionParams.y);
|
||||
#else
|
||||
return ((_ProjectionParams.z - _ProjectionParams.y) * (rawDepth) + _ProjectionParams.y);
|
||||
#endif
|
||||
#else
|
||||
return LinearEyeDepth(rawDepth, _ZBufferParams);
|
||||
#endif
|
||||
}
|
||||
|
||||
float SampleAndGetLinearDepth(float2 uv)
|
||||
{
|
||||
float rawDepth = SampleSceneDepth(uv.xy).r;
|
||||
return RawToLinearDepth(rawDepth);
|
||||
}
|
||||
|
||||
float3 ReconstructViewPos(float2 uv, float depth, float2 p11_22, float2 p13_31)
|
||||
{
|
||||
#if defined(_ORTHOGRAPHIC)
|
||||
float3 viewPos = float3(((uv.xy * 2.0 - 1.0 - p13_31) * p11_22), depth);
|
||||
#else
|
||||
float3 viewPos = float3(depth * ((uv.xy * 2.0 - 1.0 - p13_31) * p11_22), depth);
|
||||
#endif
|
||||
return viewPos;
|
||||
}
|
||||
|
||||
// Try reconstructing normal accurately from depth buffer.
|
||||
// Low: DDX/DDY on the current pixel
|
||||
// Medium: 3 taps on each direction | x | * | y |
|
||||
// High: 5 taps on each direction: | z | x | * | y | w |
|
||||
// https://atyuwen.github.io/posts/normal-reconstruction/
|
||||
// https://wickedengine.net/2019/09/22/improved-normal-reconstruction-from-depth/
|
||||
float3 ReconstructNormal(float2 uv, float depth, float3 vpos, float2 p11_22, float2 p13_31)
|
||||
{
|
||||
#if defined(_RECONSTRUCT_NORMAL_LOW)
|
||||
return normalize(cross(ddy(vpos), ddx(vpos)));
|
||||
#else
|
||||
float2 delta = _SourceSize.zw * 2.0;
|
||||
|
||||
// Sample the neighbour fragments
|
||||
float2 lUV = float2(-delta.x, 0.0);
|
||||
float2 rUV = float2( delta.x, 0.0);
|
||||
float2 uUV = float2(0.0, delta.y);
|
||||
float2 dUV = float2(0.0, -delta.y);
|
||||
|
||||
float3 l1 = float3(uv + lUV, 0.0); l1.z = SampleAndGetLinearDepth(l1.xy); // Left1
|
||||
float3 r1 = float3(uv + rUV, 0.0); r1.z = SampleAndGetLinearDepth(r1.xy); // Right1
|
||||
float3 u1 = float3(uv + uUV, 0.0); u1.z = SampleAndGetLinearDepth(u1.xy); // Up1
|
||||
float3 d1 = float3(uv + dUV, 0.0); d1.z = SampleAndGetLinearDepth(d1.xy); // Down1
|
||||
|
||||
// Determine the closest horizontal and vertical pixels...
|
||||
// horizontal: left = 0.0 right = 1.0
|
||||
// vertical : down = 0.0 up = 1.0
|
||||
#if defined(_RECONSTRUCT_NORMAL_MEDIUM)
|
||||
uint closest_horizontal = l1.z > r1.z ? 0 : 1;
|
||||
uint closest_vertical = d1.z > u1.z ? 0 : 1;
|
||||
#else
|
||||
float3 l2 = float3(uv + lUV * 2.0, 0.0); l2.z = SampleAndGetLinearDepth(l2.xy); // Left2
|
||||
float3 r2 = float3(uv + rUV * 2.0, 0.0); r2.z = SampleAndGetLinearDepth(r2.xy); // Right2
|
||||
float3 u2 = float3(uv + uUV * 2.0, 0.0); u2.z = SampleAndGetLinearDepth(u2.xy); // Up2
|
||||
float3 d2 = float3(uv + dUV * 2.0, 0.0); d2.z = SampleAndGetLinearDepth(d2.xy); // Down2
|
||||
|
||||
const uint closest_horizontal = abs( (2.0 * l1.z - l2.z) - depth) < abs( (2.0 * r1.z - r2.z) - depth) ? 0 : 1;
|
||||
const uint closest_vertical = abs( (2.0 * d1.z - d2.z) - depth) < abs( (2.0 * u1.z - u2.z) - depth) ? 0 : 1;
|
||||
#endif
|
||||
|
||||
|
||||
// Calculate the triangle, in a counter-clockwize order, to
|
||||
// use based on the closest horizontal and vertical depths.
|
||||
// h == 0.0 && v == 0.0: p1 = left, p2 = down
|
||||
// h == 1.0 && v == 0.0: p1 = down, p2 = right
|
||||
// h == 1.0 && v == 1.0: p1 = right, p2 = up
|
||||
// h == 0.0 && v == 1.0: p1 = up, p2 = left
|
||||
// Calculate the view space positions for the three points...
|
||||
float3 P1;
|
||||
float3 P2;
|
||||
if (closest_vertical == 0)
|
||||
{
|
||||
P1 = closest_horizontal == 0 ? l1 : d1;
|
||||
P2 = closest_horizontal == 0 ? d1 : r1;
|
||||
}
|
||||
else
|
||||
{
|
||||
P1 = closest_horizontal == 0 ? u1 : r1;
|
||||
P2 = closest_horizontal == 0 ? l1 : u1;
|
||||
}
|
||||
|
||||
P1 = ReconstructViewPos(P1.xy, P1.z, p11_22, p13_31);
|
||||
P2 = ReconstructViewPos(P2.xy, P2.z, p11_22, p13_31);
|
||||
|
||||
// Use the cross product to calculate the normal...
|
||||
return normalize(cross(P2 - vpos, P1 - vpos));
|
||||
#endif
|
||||
}
|
||||
|
||||
void SampleDepthNormalView(float2 uv, float2 p11_22, float2 p13_31, out float depth, out float3 normal, out float3 vpos)
|
||||
{
|
||||
depth = SampleAndGetLinearDepth(uv);
|
||||
vpos = ReconstructViewPos(uv, depth, p11_22, p13_31);
|
||||
|
||||
#if defined(_SOURCE_DEPTH_NORMALS)
|
||||
normal = SampleSceneNormals(uv);
|
||||
#else
|
||||
normal = ReconstructNormal(uv, depth, vpos, p11_22, p13_31);
|
||||
#endif
|
||||
}
|
||||
|
||||
float3x3 GetCoordinateConversionParameters(out float2 p11_22, out float2 p13_31)
|
||||
{
|
||||
float3x3 camProj = (float3x3)unity_CameraProjection;
|
||||
|
||||
p11_22 = rcp(float2(camProj._11, camProj._22));
|
||||
p13_31 = float2(camProj._13, camProj._23);
|
||||
|
||||
return camProj;
|
||||
}
|
||||
|
||||
// Distance-based AO estimator based on Morgan 2011
|
||||
// "Alchemy screen-space ambient obscurance algorithm"
|
||||
// http://graphics.cs.williams.edu/papers/AlchemyHPG11/
|
||||
float4 SSAO(Varyings input) : SV_Target
|
||||
{
|
||||
UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
|
||||
float2 uv = input.uv;
|
||||
|
||||
// Parameters used in coordinate conversion
|
||||
float2 p11_22, p13_31;
|
||||
float3x3 camProj = GetCoordinateConversionParameters(p11_22, p13_31);
|
||||
|
||||
// Get the depth, normal and view position for this fragment
|
||||
float depth_o;
|
||||
float3 norm_o;
|
||||
float3 vpos_o;
|
||||
SampleDepthNormalView(uv, p11_22, p13_31, depth_o, norm_o, vpos_o);
|
||||
|
||||
// This was added to avoid a NVIDIA driver issue.
|
||||
float randAddon = uv.x * 1e-10;
|
||||
|
||||
float rcpSampleCount = rcp(SAMPLE_COUNT);
|
||||
float ao = 0.0;
|
||||
for (int s = 0; s < int(SAMPLE_COUNT); s++)
|
||||
{
|
||||
#if defined(SHADER_API_D3D11)
|
||||
// This 'floor(1.0001 * s)' operation is needed to avoid a DX11 NVidia shader issue.
|
||||
s = floor(1.0001 * s);
|
||||
#endif
|
||||
|
||||
// Sample point
|
||||
float3 v_s1 = PickSamplePoint(uv, randAddon, s);
|
||||
|
||||
// Make it distributed between [0, _Radius]
|
||||
v_s1 *= sqrt((s + 1.0) * rcpSampleCount ) * RADIUS;
|
||||
|
||||
v_s1 = faceforward(v_s1, -norm_o, v_s1);
|
||||
float3 vpos_s1 = vpos_o + v_s1;
|
||||
|
||||
// Reproject the sample point
|
||||
float3 spos_s1 = mul(camProj, vpos_s1);
|
||||
#if defined(_ORTHOGRAPHIC)
|
||||
float2 uv_s1_01 = clamp((spos_s1.xy + 1.0) * 0.5, 0.0, 1.0);
|
||||
#else
|
||||
float2 uv_s1_01 = clamp((spos_s1.xy * rcp(vpos_s1.z) + 1.0) * 0.5, 0.0, 1.0);
|
||||
#endif
|
||||
|
||||
// Depth at the sample point
|
||||
float depth_s1 = SampleAndGetLinearDepth(uv_s1_01);
|
||||
|
||||
// Relative position of the sample point
|
||||
float3 vpos_s2 = ReconstructViewPos(uv_s1_01, depth_s1, p11_22, p13_31);
|
||||
float3 v_s2 = vpos_s2 - vpos_o;
|
||||
|
||||
// Estimate the obscurance value
|
||||
float a1 = max(dot(v_s2, norm_o) - kBeta * depth_o, 0.0);
|
||||
float a2 = dot(v_s2, v_s2) + EPSILON;
|
||||
ao += a1 * rcp(a2);
|
||||
}
|
||||
|
||||
// Intensity normalization
|
||||
ao *= RADIUS;
|
||||
|
||||
// Apply contrast
|
||||
ao = PositivePow(ao * INTENSITY * rcpSampleCount, kContrast);
|
||||
|
||||
return PackAONormal(ao, norm_o);
|
||||
}
|
||||
|
||||
// Geometry-aware separable bilateral filter
|
||||
half4 Blur(float2 uv, float2 delta) : SV_Target
|
||||
{
|
||||
float4 p0 = SAMPLE_BASEMAP(uv );
|
||||
float4 p1a = SAMPLE_BASEMAP(uv - delta * 1.3846153846);
|
||||
float4 p1b = SAMPLE_BASEMAP(uv + delta * 1.3846153846);
|
||||
float4 p2a = SAMPLE_BASEMAP(uv - delta * 3.2307692308);
|
||||
float4 p2b = SAMPLE_BASEMAP(uv + delta * 3.2307692308);
|
||||
|
||||
#if defined(BLUR_SAMPLE_CENTER_NORMAL)
|
||||
#if defined(_SOURCE_DEPTH_NORMALS)
|
||||
float3 n0 = SampleSceneNormals(uv);
|
||||
#else
|
||||
float2 p11_22, p13_31;
|
||||
float3x3 camProj = GetCoordinateConversionParameters(p11_22, p13_31);
|
||||
|
||||
// Get the depth, normal and view position for this fragment
|
||||
float depth_o;
|
||||
float3 n0;
|
||||
float3 vpos_o;
|
||||
SampleDepthNormalView(uv, p11_22, p13_31, depth_o, n0, vpos_o);
|
||||
#endif
|
||||
#else
|
||||
float3 n0 = GetPackedNormal(p0);
|
||||
#endif
|
||||
|
||||
float w0 = 0.2270270270;
|
||||
float w1a = CompareNormal(n0, GetPackedNormal(p1a)) * 0.3162162162;
|
||||
float w1b = CompareNormal(n0, GetPackedNormal(p1b)) * 0.3162162162;
|
||||
float w2a = CompareNormal(n0, GetPackedNormal(p2a)) * 0.0702702703;
|
||||
float w2b = CompareNormal(n0, GetPackedNormal(p2b)) * 0.0702702703;
|
||||
|
||||
float s;
|
||||
s = GetPackedAO(p0) * w0;
|
||||
s += GetPackedAO(p1a) * w1a;
|
||||
s += GetPackedAO(p1b) * w1b;
|
||||
s += GetPackedAO(p2a) * w2a;
|
||||
s += GetPackedAO(p2b) * w2b;
|
||||
|
||||
s *= rcp(w0 + w1a + w1b + w2a + w2b);
|
||||
|
||||
return PackAONormal(s, n0);
|
||||
}
|
||||
|
||||
// Geometry-aware bilateral filter (single pass/small kernel)
|
||||
float BlurSmall(float2 uv, float2 delta)
|
||||
{
|
||||
float4 p0 = SAMPLE_BASEMAP(uv );
|
||||
float4 p1 = SAMPLE_BASEMAP(uv + float2(-delta.x, -delta.y));
|
||||
float4 p2 = SAMPLE_BASEMAP(uv + float2( delta.x, -delta.y));
|
||||
float4 p3 = SAMPLE_BASEMAP(uv + float2(-delta.x, delta.y));
|
||||
float4 p4 = SAMPLE_BASEMAP(uv + float2( delta.x, delta.y));
|
||||
|
||||
float3 n0 = GetPackedNormal(p0);
|
||||
|
||||
float w0 = 1.0;
|
||||
float w1 = CompareNormal(n0, GetPackedNormal(p1));
|
||||
float w2 = CompareNormal(n0, GetPackedNormal(p2));
|
||||
float w3 = CompareNormal(n0, GetPackedNormal(p3));
|
||||
float w4 = CompareNormal(n0, GetPackedNormal(p4));
|
||||
|
||||
float s;
|
||||
s = GetPackedAO(p0) * w0;
|
||||
s += GetPackedAO(p1) * w1;
|
||||
s += GetPackedAO(p2) * w2;
|
||||
s += GetPackedAO(p3) * w3;
|
||||
s += GetPackedAO(p4) * w4;
|
||||
|
||||
return s *= rcp(w0 + w1 + w2 + w3 + w4);
|
||||
}
|
||||
|
||||
half4 HorizontalBlur(Varyings input) : SV_Target
|
||||
{
|
||||
UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
|
||||
|
||||
float2 uv = input.uv;
|
||||
float2 delta = float2(_SourceSize.z * rcp(DOWNSAMPLE) * 2.0, 0.0);
|
||||
return Blur(uv, delta);
|
||||
}
|
||||
|
||||
half4 VerticalBlur(Varyings input) : SV_Target
|
||||
{
|
||||
UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
|
||||
|
||||
float2 uv = input.uv;
|
||||
float2 delta = float2(0.0, _SourceSize.w * rcp(DOWNSAMPLE) * 2.0);
|
||||
return Blur(uv, delta);
|
||||
}
|
||||
|
||||
half4 FinalBlur(Varyings input) : SV_Target
|
||||
{
|
||||
UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(input);
|
||||
|
||||
float2 uv = input.uv;
|
||||
float2 delta = _SourceSize.zw * rcp(DOWNSAMPLE);
|
||||
return 1.0 - BlurSmall(uv, delta );
|
||||
}
|
||||
|
||||
#endif //UNIVERSAL_SSAO_INCLUDED
|
@@ -0,0 +1,89 @@
|
||||
#ifndef UNITY_GRAPHFUNCTIONS_LW_INCLUDED
|
||||
#define UNITY_GRAPHFUNCTIONS_LW_INCLUDED
|
||||
|
||||
#define SHADERGRAPH_SAMPLE_SCENE_DEPTH(uv) shadergraph_LWSampleSceneDepth(uv)
|
||||
#define SHADERGRAPH_SAMPLE_SCENE_COLOR(uv) shadergraph_LWSampleSceneColor(uv)
|
||||
#define SHADERGRAPH_BAKED_GI(positionWS, normalWS, uvStaticLightmap, uvDynamicLightmap, applyScaling) shadergraph_LWBakedGI(positionWS, normalWS, uvStaticLightmap, uvDynamicLightmap, applyScaling)
|
||||
#define SHADERGRAPH_REFLECTION_PROBE(viewDir, normalOS, lod) shadergraph_LWReflectionProbe(viewDir, normalOS, lod)
|
||||
#define SHADERGRAPH_FOG(position, color, density) shadergraph_LWFog(position, color, density)
|
||||
#define SHADERGRAPH_AMBIENT_SKY unity_AmbientSky
|
||||
#define SHADERGRAPH_AMBIENT_EQUATOR unity_AmbientEquator
|
||||
#define SHADERGRAPH_AMBIENT_GROUND unity_AmbientGround
|
||||
|
||||
#if defined(REQUIRE_DEPTH_TEXTURE)
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/DeclareDepthTexture.hlsl"
|
||||
#endif
|
||||
|
||||
#if defined(REQUIRE_OPAQUE_TEXTURE)
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/DeclareOpaqueTexture.hlsl"
|
||||
#endif
|
||||
|
||||
float shadergraph_LWSampleSceneDepth(float2 uv)
|
||||
{
|
||||
#if defined(REQUIRE_DEPTH_TEXTURE)
|
||||
return SampleSceneDepth(uv);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
float3 shadergraph_LWSampleSceneColor(float2 uv)
|
||||
{
|
||||
#if defined(REQUIRE_OPAQUE_TEXTURE)
|
||||
return SampleSceneColor(uv);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
float3 shadergraph_LWBakedGI(float3 positionWS, float3 normalWS, float2 uvStaticLightmap, float2 uvDynamicLightmap, bool applyScaling)
|
||||
{
|
||||
#ifdef LIGHTMAP_ON
|
||||
if (applyScaling)
|
||||
uvStaticLightmap = uvStaticLightmap * unity_LightmapST.xy + unity_LightmapST.zw;
|
||||
|
||||
return SampleLightmap(uvStaticLightmap, normalWS);
|
||||
#else
|
||||
return SampleSH(normalWS);
|
||||
#endif
|
||||
}
|
||||
|
||||
float3 shadergraph_LWReflectionProbe(float3 viewDir, float3 normalOS, float lod)
|
||||
{
|
||||
float3 reflectVec = reflect(-viewDir, normalOS);
|
||||
return DecodeHDREnvironment(SAMPLE_TEXTURECUBE_LOD(unity_SpecCube0, samplerunity_SpecCube0, reflectVec, lod), unity_SpecCube0_HDR);
|
||||
}
|
||||
|
||||
void shadergraph_LWFog(float3 position, out float4 color, out float density)
|
||||
{
|
||||
color = unity_FogColor;
|
||||
density = ComputeFogFactor(TransformObjectToHClip(position).z);
|
||||
}
|
||||
|
||||
// This function assumes the bitangent flip is encoded in tangentWS.w
|
||||
float3x3 BuildTangentToWorld(float4 tangentWS, float3 normalWS)
|
||||
{
|
||||
// tangentWS must not be normalized (mikkts requirement)
|
||||
|
||||
// Normalize normalWS vector but keep the renormFactor to apply it to bitangent and tangent
|
||||
float3 unnormalizedNormalWS = normalWS;
|
||||
float renormFactor = 1.0 / length(unnormalizedNormalWS);
|
||||
|
||||
// bitangent on the fly option in xnormal to reduce vertex shader outputs.
|
||||
// this is the mikktspace transformation (must use unnormalized attributes)
|
||||
float3x3 tangentToWorld = CreateTangentToWorld(unnormalizedNormalWS, tangentWS.xyz, tangentWS.w > 0.0 ? 1.0 : -1.0);
|
||||
|
||||
// surface gradient based formulation requires a unit length initial normal. We can maintain compliance with mikkts
|
||||
// by uniformly scaling all 3 vectors since normalization of the perturbed normal will cancel it.
|
||||
tangentToWorld[0] = tangentToWorld[0] * renormFactor;
|
||||
tangentToWorld[1] = tangentToWorld[1] * renormFactor;
|
||||
tangentToWorld[2] = tangentToWorld[2] * renormFactor; // normalizes the interpolated vertex normal
|
||||
|
||||
return tangentToWorld;
|
||||
}
|
||||
|
||||
// Always include Shader Graph version
|
||||
// Always include last to avoid double macros
|
||||
#include "Packages/com.unity.shadergraph/ShaderGraphLibrary/Functions.hlsl"
|
||||
|
||||
#endif // UNITY_GRAPHFUNCTIONS_LW_INCLUDED
|
@@ -0,0 +1,15 @@
|
||||
namespace UnityEngine.Rendering.Universal
|
||||
{
|
||||
public static partial class ShaderInput
|
||||
{
|
||||
[GenerateHLSL(PackingRules.Exact, false)]
|
||||
public struct LightData
|
||||
{
|
||||
public Vector4 position;
|
||||
public Vector4 color;
|
||||
public Vector4 attenuation;
|
||||
public Vector4 spotDirection;
|
||||
public Vector4 occlusionProbeChannels;
|
||||
}
|
||||
}
|
||||
}
|
@@ -0,0 +1,19 @@
|
||||
//
|
||||
// This file was automatically generated. Please don't edit by hand. Execute Editor command [ Edit / Render Pipeline / Generate Shader Includes ] instead
|
||||
//
|
||||
|
||||
#ifndef SHADERTYPES_CS_HLSL
|
||||
#define SHADERTYPES_CS_HLSL
|
||||
// Generated from UnityEngine.Rendering.Universal.ShaderInput+LightData
|
||||
// PackingRules = Exact
|
||||
struct LightData
|
||||
{
|
||||
float4 position;
|
||||
float4 color;
|
||||
float4 attenuation;
|
||||
float4 spotDirection;
|
||||
float4 occlusionProbeChannels;
|
||||
};
|
||||
|
||||
|
||||
#endif
|
@@ -0,0 +1,17 @@
|
||||
#ifndef UNITY_SHADER_VARIABLES_FUNCTIONS_DEPRECATED_INCLUDED
|
||||
#define UNITY_SHADER_VARIABLES_FUNCTIONS_DEPRECATED_INCLUDED
|
||||
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Input.hlsl"
|
||||
|
||||
// Deprecated: A confusingly named and duplicate function that scales clipspace to unity NDC range. (-w < x(-y) < w --> 0 < xy < w)
|
||||
// Use GetVertexPositionInputs().positionNDC instead for vertex shader
|
||||
// Or a similar function in Common.hlsl, ComputeNormalizedDeviceCoordinatesWithZ()
|
||||
float4 ComputeScreenPos(float4 positionCS)
|
||||
{
|
||||
float4 o = positionCS * 0.5f;
|
||||
o.xy = float2(o.x, o.y * _ProjectionParams.x) + o.w;
|
||||
o.zw = positionCS.zw;
|
||||
return o;
|
||||
}
|
||||
|
||||
#endif // UNITY_SHADER_VARIABLES_FUNCTIONS_DEPRECATED_INCLUDED
|
@@ -0,0 +1,276 @@
|
||||
#ifndef UNITY_SHADER_VARIABLES_FUNCTIONS_INCLUDED
|
||||
#define UNITY_SHADER_VARIABLES_FUNCTIONS_INCLUDED
|
||||
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/ShaderVariablesFunctions.deprecated.hlsl"
|
||||
|
||||
VertexPositionInputs GetVertexPositionInputs(float3 positionOS)
|
||||
{
|
||||
VertexPositionInputs input;
|
||||
input.positionWS = TransformObjectToWorld(positionOS);
|
||||
input.positionVS = TransformWorldToView(input.positionWS);
|
||||
input.positionCS = TransformWorldToHClip(input.positionWS);
|
||||
|
||||
float4 ndc = input.positionCS * 0.5f;
|
||||
input.positionNDC.xy = float2(ndc.x, ndc.y * _ProjectionParams.x) + ndc.w;
|
||||
input.positionNDC.zw = input.positionCS.zw;
|
||||
|
||||
return input;
|
||||
}
|
||||
|
||||
VertexNormalInputs GetVertexNormalInputs(float3 normalOS)
|
||||
{
|
||||
VertexNormalInputs tbn;
|
||||
tbn.tangentWS = real3(1.0, 0.0, 0.0);
|
||||
tbn.bitangentWS = real3(0.0, 1.0, 0.0);
|
||||
tbn.normalWS = TransformObjectToWorldNormal(normalOS);
|
||||
return tbn;
|
||||
}
|
||||
|
||||
VertexNormalInputs GetVertexNormalInputs(float3 normalOS, float4 tangentOS)
|
||||
{
|
||||
VertexNormalInputs tbn;
|
||||
|
||||
// mikkts space compliant. only normalize when extracting normal at frag.
|
||||
real sign = tangentOS.w * GetOddNegativeScale();
|
||||
tbn.normalWS = TransformObjectToWorldNormal(normalOS);
|
||||
tbn.tangentWS = TransformObjectToWorldDir(tangentOS.xyz);
|
||||
tbn.bitangentWS = cross(tbn.normalWS, tbn.tangentWS) * sign;
|
||||
return tbn;
|
||||
}
|
||||
|
||||
float4 GetScaledScreenParams()
|
||||
{
|
||||
return _ScaledScreenParams;
|
||||
}
|
||||
|
||||
// Returns 'true' if the current view performs a perspective projection.
|
||||
bool IsPerspectiveProjection()
|
||||
{
|
||||
return (unity_OrthoParams.w == 0);
|
||||
}
|
||||
|
||||
float3 GetCameraPositionWS()
|
||||
{
|
||||
// Currently we do not support Camera Relative Rendering so
|
||||
// we simply return the _WorldSpaceCameraPos until then
|
||||
return _WorldSpaceCameraPos;
|
||||
|
||||
// We will replace the code above with this one once
|
||||
// we start supporting Camera Relative Rendering
|
||||
//#if (SHADEROPTIONS_CAMERA_RELATIVE_RENDERING != 0)
|
||||
// return float3(0, 0, 0);
|
||||
//#else
|
||||
// return _WorldSpaceCameraPos;
|
||||
//#endif
|
||||
}
|
||||
|
||||
// Could be e.g. the position of a primary camera or a shadow-casting light.
|
||||
float3 GetCurrentViewPosition()
|
||||
{
|
||||
// Currently we do not support Camera Relative Rendering so
|
||||
// we simply return the _WorldSpaceCameraPos until then
|
||||
return GetCameraPositionWS();
|
||||
|
||||
// We will replace the code above with this one once
|
||||
// we start supporting Camera Relative Rendering
|
||||
//#if defined(SHADERPASS) && (SHADERPASS != SHADERPASS_SHADOWS)
|
||||
// return GetCameraPositionWS();
|
||||
//#else
|
||||
// // This is a generic solution.
|
||||
// // However, for the primary camera, using '_WorldSpaceCameraPos' is better for cache locality,
|
||||
// // and in case we enable camera-relative rendering, we can statically set the position is 0.
|
||||
// return UNITY_MATRIX_I_V._14_24_34;
|
||||
//#endif
|
||||
}
|
||||
|
||||
// Returns the forward (central) direction of the current view in the world space.
|
||||
float3 GetViewForwardDir()
|
||||
{
|
||||
float4x4 viewMat = GetWorldToViewMatrix();
|
||||
return -viewMat[2].xyz;
|
||||
}
|
||||
|
||||
// Computes the world space view direction (pointing towards the viewer).
|
||||
float3 GetWorldSpaceViewDir(float3 positionWS)
|
||||
{
|
||||
if (IsPerspectiveProjection())
|
||||
{
|
||||
// Perspective
|
||||
return GetCurrentViewPosition() - positionWS;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Orthographic
|
||||
return -GetViewForwardDir();
|
||||
}
|
||||
}
|
||||
|
||||
float3 GetWorldSpaceNormalizeViewDir(float3 positionWS)
|
||||
{
|
||||
if (IsPerspectiveProjection())
|
||||
{
|
||||
// Perspective
|
||||
float3 V = GetCurrentViewPosition() - positionWS;
|
||||
return normalize(V);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Orthographic
|
||||
return -GetViewForwardDir();
|
||||
}
|
||||
}
|
||||
|
||||
// UNITY_MATRIX_V defines a right-handed view space with the Z axis pointing towards the viewer.
|
||||
// This function reverses the direction of the Z axis (so that it points forward),
|
||||
// making the view space coordinate system left-handed.
|
||||
void GetLeftHandedViewSpaceMatrices(out float4x4 viewMatrix, out float4x4 projMatrix)
|
||||
{
|
||||
viewMatrix = UNITY_MATRIX_V;
|
||||
viewMatrix._31_32_33_34 = -viewMatrix._31_32_33_34;
|
||||
|
||||
projMatrix = UNITY_MATRIX_P;
|
||||
projMatrix._13_23_33_43 = -projMatrix._13_23_33_43;
|
||||
}
|
||||
|
||||
void AlphaDiscard(real alpha, real cutoff, real offset = 0.0h)
|
||||
{
|
||||
#ifdef _ALPHATEST_ON
|
||||
clip(alpha - cutoff + offset);
|
||||
#endif
|
||||
}
|
||||
|
||||
half OutputAlpha(half outputAlpha, half surfaceType = 0.0)
|
||||
{
|
||||
return surfaceType == 1 ? outputAlpha : 1.0;
|
||||
}
|
||||
|
||||
// A word on normalization of normals:
|
||||
// For better quality normals should be normalized before and after
|
||||
// interpolation.
|
||||
// 1) In vertex, skinning or blend shapes might vary significantly the lenght of normal.
|
||||
// 2) In fragment, because even outputting unit-length normals interpolation can make it non-unit.
|
||||
// 3) In fragment when using normal map, because mikktspace sets up non orthonormal basis.
|
||||
// However we will try to balance performance vs quality here as also let users configure that as
|
||||
// shader quality tiers.
|
||||
// Low Quality Tier: Don't normalize per-vertex.
|
||||
// Medium Quality Tier: Always normalize per-vertex.
|
||||
// High Quality Tier: Always normalize per-vertex.
|
||||
//
|
||||
// Always normalize per-pixel.
|
||||
// Too many bug like lighting quality issues otherwise.
|
||||
real3 NormalizeNormalPerVertex(real3 normalWS)
|
||||
{
|
||||
#if defined(SHADER_QUALITY_LOW) && defined(_NORMALMAP)
|
||||
return normalWS;
|
||||
#else
|
||||
return normalize(normalWS);
|
||||
#endif
|
||||
}
|
||||
|
||||
real3 NormalizeNormalPerPixel(real3 normalWS)
|
||||
{
|
||||
return normalize(normalWS);
|
||||
}
|
||||
|
||||
|
||||
|
||||
real ComputeFogFactor(float z)
|
||||
{
|
||||
float clipZ_01 = UNITY_Z_0_FAR_FROM_CLIPSPACE(z);
|
||||
|
||||
#if defined(FOG_LINEAR)
|
||||
// factor = (end-z)/(end-start) = z * (-1/(end-start)) + (end/(end-start))
|
||||
float fogFactor = saturate(clipZ_01 * unity_FogParams.z + unity_FogParams.w);
|
||||
return real(fogFactor);
|
||||
#elif defined(FOG_EXP) || defined(FOG_EXP2)
|
||||
// factor = exp(-(density*z)^2)
|
||||
// -density * z computed at vertex
|
||||
return real(unity_FogParams.x * clipZ_01);
|
||||
#else
|
||||
return 0.0h;
|
||||
#endif
|
||||
}
|
||||
|
||||
real ComputeFogIntensity(real fogFactor)
|
||||
{
|
||||
real fogIntensity = 0.0h;
|
||||
#if defined(FOG_LINEAR) || defined(FOG_EXP) || defined(FOG_EXP2)
|
||||
#if defined(FOG_EXP)
|
||||
// factor = exp(-density*z)
|
||||
// fogFactor = density*z compute at vertex
|
||||
fogIntensity = saturate(exp2(-fogFactor));
|
||||
#elif defined(FOG_EXP2)
|
||||
// factor = exp(-(density*z)^2)
|
||||
// fogFactor = density*z compute at vertex
|
||||
fogIntensity = saturate(exp2(-fogFactor * fogFactor));
|
||||
#elif defined(FOG_LINEAR)
|
||||
fogIntensity = fogFactor;
|
||||
#endif
|
||||
#endif
|
||||
return fogIntensity;
|
||||
}
|
||||
|
||||
half3 MixFogColor(real3 fragColor, real3 fogColor, real fogFactor)
|
||||
{
|
||||
#if defined(FOG_LINEAR) || defined(FOG_EXP) || defined(FOG_EXP2)
|
||||
real fogIntensity = ComputeFogIntensity(fogFactor);
|
||||
fragColor = lerp(fogColor, fragColor, fogIntensity);
|
||||
#endif
|
||||
return fragColor;
|
||||
}
|
||||
|
||||
half3 MixFog(real3 fragColor, real fogFactor)
|
||||
{
|
||||
return MixFogColor(fragColor, unity_FogColor.rgb, fogFactor);
|
||||
}
|
||||
|
||||
void TransformScreenUV(inout float2 uv, float screenHeight)
|
||||
{
|
||||
#if UNITY_UV_STARTS_AT_TOP
|
||||
uv.y = screenHeight - (uv.y * _ScaleBiasRt.x + _ScaleBiasRt.y * screenHeight);
|
||||
#endif
|
||||
}
|
||||
|
||||
void TransformScreenUV(inout float2 uv)
|
||||
{
|
||||
#if UNITY_UV_STARTS_AT_TOP
|
||||
TransformScreenUV(uv, GetScaledScreenParams().y);
|
||||
#endif
|
||||
}
|
||||
|
||||
void TransformNormalizedScreenUV(inout float2 uv)
|
||||
{
|
||||
#if UNITY_UV_STARTS_AT_TOP
|
||||
TransformScreenUV(uv, 1.0);
|
||||
#endif
|
||||
}
|
||||
|
||||
float2 GetNormalizedScreenSpaceUV(float2 positionCS)
|
||||
{
|
||||
float2 normalizedScreenSpaceUV = positionCS.xy * rcp(GetScaledScreenParams().xy);
|
||||
TransformNormalizedScreenUV(normalizedScreenSpaceUV);
|
||||
return normalizedScreenSpaceUV;
|
||||
}
|
||||
|
||||
float2 GetNormalizedScreenSpaceUV(float4 positionCS)
|
||||
{
|
||||
return GetNormalizedScreenSpaceUV(positionCS.xy);
|
||||
}
|
||||
|
||||
#if defined(UNITY_SINGLE_PASS_STEREO)
|
||||
float2 TransformStereoScreenSpaceTex(float2 uv, float w)
|
||||
{
|
||||
// TODO: RVS support can be added here, if Universal decides to support it
|
||||
float4 scaleOffset = unity_StereoScaleOffset[unity_StereoEyeIndex];
|
||||
return uv.xy * scaleOffset.xy + scaleOffset.zw * w;
|
||||
}
|
||||
|
||||
float2 UnityStereoTransformScreenSpaceTex(float2 uv)
|
||||
{
|
||||
return TransformStereoScreenSpaceTex(saturate(uv), 1.0);
|
||||
}
|
||||
#else
|
||||
#define UnityStereoTransformScreenSpaceTex(uv) uv
|
||||
#endif // defined(UNITY_SINGLE_PASS_STEREO)
|
||||
|
||||
#endif // UNITY_SHADER_VARIABLES_FUNCTIONS_INCLUDED
|
@@ -0,0 +1,482 @@
|
||||
#ifndef UNIVERSAL_SHADOWS_INCLUDED
|
||||
#define UNIVERSAL_SHADOWS_INCLUDED
|
||||
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Shadow/ShadowSamplingTent.hlsl"
|
||||
#include "Core.hlsl"
|
||||
|
||||
#define MAX_SHADOW_CASCADES 4
|
||||
|
||||
#if !defined(_RECEIVE_SHADOWS_OFF)
|
||||
#if defined(_MAIN_LIGHT_SHADOWS) || defined(_MAIN_LIGHT_SHADOWS_CASCADE) || defined(_MAIN_LIGHT_SHADOWS_SCREEN)
|
||||
#define MAIN_LIGHT_CALCULATE_SHADOWS
|
||||
|
||||
#if !defined(_MAIN_LIGHT_SHADOWS_CASCADE)
|
||||
#define REQUIRES_VERTEX_SHADOW_COORD_INTERPOLATOR
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(_ADDITIONAL_LIGHT_SHADOWS)
|
||||
#define ADDITIONAL_LIGHT_CALCULATE_SHADOWS
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(UNITY_DOTS_INSTANCING_ENABLED)
|
||||
#define SHADOWMASK_NAME unity_ShadowMasks
|
||||
#define SHADOWMASK_SAMPLER_NAME samplerunity_ShadowMasks
|
||||
#define SHADOWMASK_SAMPLE_EXTRA_ARGS , unity_LightmapIndex.x
|
||||
#else
|
||||
#define SHADOWMASK_NAME unity_ShadowMask
|
||||
#define SHADOWMASK_SAMPLER_NAME samplerunity_ShadowMask
|
||||
#define SHADOWMASK_SAMPLE_EXTRA_ARGS
|
||||
#endif
|
||||
|
||||
#if defined(SHADOWS_SHADOWMASK) && defined(LIGHTMAP_ON)
|
||||
#define SAMPLE_SHADOWMASK(uv) SAMPLE_TEXTURE2D_LIGHTMAP(SHADOWMASK_NAME, SHADOWMASK_SAMPLER_NAME, uv SHADOWMASK_SAMPLE_EXTRA_ARGS);
|
||||
#elif !defined (LIGHTMAP_ON)
|
||||
#define SAMPLE_SHADOWMASK(uv) unity_ProbesOcclusion;
|
||||
#else
|
||||
#define SAMPLE_SHADOWMASK(uv) half4(1, 1, 1, 1);
|
||||
#endif
|
||||
|
||||
#define REQUIRES_WORLD_SPACE_POS_INTERPOLATOR
|
||||
|
||||
#if defined(LIGHTMAP_ON) || defined(LIGHTMAP_SHADOW_MIXING) || defined(SHADOWS_SHADOWMASK)
|
||||
#define CALCULATE_BAKED_SHADOWS
|
||||
#endif
|
||||
|
||||
SCREENSPACE_TEXTURE(_ScreenSpaceShadowmapTexture);
|
||||
SAMPLER(sampler_ScreenSpaceShadowmapTexture);
|
||||
|
||||
TEXTURE2D_SHADOW(_MainLightShadowmapTexture);
|
||||
SAMPLER_CMP(sampler_MainLightShadowmapTexture);
|
||||
|
||||
TEXTURE2D_SHADOW(_AdditionalLightsShadowmapTexture);
|
||||
SAMPLER_CMP(sampler_AdditionalLightsShadowmapTexture);
|
||||
|
||||
// GLES3 causes a performance regression in some devices when using CBUFFER.
|
||||
#ifndef SHADER_API_GLES3
|
||||
CBUFFER_START(MainLightShadows)
|
||||
#endif
|
||||
// Last cascade is initialized with a no-op matrix. It always transforms
|
||||
// shadow coord to half3(0, 0, NEAR_PLANE). We use this trick to avoid
|
||||
// branching since ComputeCascadeIndex can return cascade index = MAX_SHADOW_CASCADES
|
||||
float4x4 _MainLightWorldToShadow[MAX_SHADOW_CASCADES + 1];
|
||||
float4 _CascadeShadowSplitSpheres0;
|
||||
float4 _CascadeShadowSplitSpheres1;
|
||||
float4 _CascadeShadowSplitSpheres2;
|
||||
float4 _CascadeShadowSplitSpheres3;
|
||||
float4 _CascadeShadowSplitSphereRadii;
|
||||
half4 _MainLightShadowOffset0;
|
||||
half4 _MainLightShadowOffset1;
|
||||
half4 _MainLightShadowOffset2;
|
||||
half4 _MainLightShadowOffset3;
|
||||
half4 _MainLightShadowParams; // (x: shadowStrength, y: 1.0 if soft shadows, 0.0 otherwise, z: oneOverFadeDist, w: minusStartFade) - xy are used by MainLight only, yz are used by MainLight AND AdditionalLights
|
||||
float4 _MainLightShadowmapSize; // (xy: 1/width and 1/height, zw: width and height)
|
||||
#ifndef SHADER_API_GLES3
|
||||
CBUFFER_END
|
||||
#endif
|
||||
|
||||
#if USE_STRUCTURED_BUFFER_FOR_LIGHT_DATA
|
||||
|
||||
StructuredBuffer<float4> _AdditionalShadowParams_SSBO; // Per-light data - TODO: test if splitting _AdditionalShadowParams_SSBO[lightIndex].w into a separate StructuredBuffer<int> buffer is faster
|
||||
StructuredBuffer<float4x4> _AdditionalLightsWorldToShadow_SSBO; // Per-shadow-slice-data - A shadow casting light can have 6 shadow slices (if it's a point light)
|
||||
|
||||
half4 _AdditionalShadowOffset0;
|
||||
half4 _AdditionalShadowOffset1;
|
||||
half4 _AdditionalShadowOffset2;
|
||||
half4 _AdditionalShadowOffset3;
|
||||
float4 _AdditionalShadowmapSize; // (xy: 1/width and 1/height, zw: width and height)
|
||||
|
||||
#else
|
||||
|
||||
|
||||
#if defined(SHADER_API_MOBILE) || (defined(SHADER_API_GLCORE) && !defined(SHADER_API_SWITCH)) || defined(SHADER_API_GLES) || defined(SHADER_API_GLES3) // Workaround for bug on Nintendo Switch where SHADER_API_GLCORE is mistakenly defined
|
||||
// Point lights can use 6 shadow slices, but on some mobile GPUs performance decrease drastically with uniform blocks bigger than 8kb. This number ensures size of buffer AdditionalLightShadows stays reasonable.
|
||||
// It also avoids shader compilation errors on SHADER_API_GLES30 devices where max number of uniforms per shader GL_MAX_FRAGMENT_UNIFORM_VECTORS is low (224)
|
||||
// Keep in sync with MAX_PUNCTUAL_LIGHT_SHADOW_SLICES_IN_UBO in AdditionalLightsShadowCasterPass.cs
|
||||
#define MAX_PUNCTUAL_LIGHT_SHADOW_SLICES_IN_UBO (MAX_VISIBLE_LIGHTS)
|
||||
#else
|
||||
// Point lights can use 6 shadow slices, but on some platforms max uniform block size is 64kb. This number ensures size of buffer AdditionalLightShadows does not exceed this 64kb limit.
|
||||
// Keep in sync with MAX_PUNCTUAL_LIGHT_SHADOW_SLICES_IN_UBO in AdditionalLightsShadowCasterPass.cs
|
||||
#define MAX_PUNCTUAL_LIGHT_SHADOW_SLICES_IN_UBO 545
|
||||
#endif
|
||||
|
||||
// GLES3 causes a performance regression in some devices when using CBUFFER.
|
||||
#ifndef SHADER_API_GLES3
|
||||
CBUFFER_START(AdditionalLightShadows)
|
||||
#endif
|
||||
|
||||
half4 _AdditionalShadowParams[MAX_VISIBLE_LIGHTS]; // Per-light data
|
||||
float4x4 _AdditionalLightsWorldToShadow[MAX_PUNCTUAL_LIGHT_SHADOW_SLICES_IN_UBO]; // Per-shadow-slice-data
|
||||
|
||||
half4 _AdditionalShadowOffset0;
|
||||
half4 _AdditionalShadowOffset1;
|
||||
half4 _AdditionalShadowOffset2;
|
||||
half4 _AdditionalShadowOffset3;
|
||||
float4 _AdditionalShadowmapSize; // (xy: 1/width and 1/height, zw: width and height)
|
||||
|
||||
#ifndef SHADER_API_GLES3
|
||||
CBUFFER_END
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
float4 _ShadowBias; // x: depth bias, y: normal bias
|
||||
|
||||
#define BEYOND_SHADOW_FAR(shadowCoord) shadowCoord.z <= 0.0 || shadowCoord.z >= 1.0
|
||||
|
||||
struct ShadowSamplingData
|
||||
{
|
||||
half4 shadowOffset0;
|
||||
half4 shadowOffset1;
|
||||
half4 shadowOffset2;
|
||||
half4 shadowOffset3;
|
||||
float4 shadowmapSize;
|
||||
};
|
||||
|
||||
ShadowSamplingData GetMainLightShadowSamplingData()
|
||||
{
|
||||
ShadowSamplingData shadowSamplingData;
|
||||
|
||||
// shadowOffsets are used in SampleShadowmapFiltered #if defined(SHADER_API_MOBILE) || defined(SHADER_API_SWITCH)
|
||||
shadowSamplingData.shadowOffset0 = _MainLightShadowOffset0;
|
||||
shadowSamplingData.shadowOffset1 = _MainLightShadowOffset1;
|
||||
shadowSamplingData.shadowOffset2 = _MainLightShadowOffset2;
|
||||
shadowSamplingData.shadowOffset3 = _MainLightShadowOffset3;
|
||||
|
||||
// shadowmapSize is used in SampleShadowmapFiltered for other platforms
|
||||
shadowSamplingData.shadowmapSize = _MainLightShadowmapSize;
|
||||
|
||||
return shadowSamplingData;
|
||||
}
|
||||
|
||||
ShadowSamplingData GetAdditionalLightShadowSamplingData()
|
||||
{
|
||||
ShadowSamplingData shadowSamplingData;
|
||||
|
||||
// shadowOffsets are used in SampleShadowmapFiltered #if defined(SHADER_API_MOBILE) || defined(SHADER_API_SWITCH)
|
||||
shadowSamplingData.shadowOffset0 = _AdditionalShadowOffset0;
|
||||
shadowSamplingData.shadowOffset1 = _AdditionalShadowOffset1;
|
||||
shadowSamplingData.shadowOffset2 = _AdditionalShadowOffset2;
|
||||
shadowSamplingData.shadowOffset3 = _AdditionalShadowOffset3;
|
||||
|
||||
// shadowmapSize is used in SampleShadowmapFiltered for other platforms
|
||||
shadowSamplingData.shadowmapSize = _AdditionalShadowmapSize;
|
||||
|
||||
return shadowSamplingData;
|
||||
}
|
||||
|
||||
// ShadowParams
|
||||
// x: ShadowStrength
|
||||
// y: 1.0 if shadow is soft, 0.0 otherwise
|
||||
half4 GetMainLightShadowParams()
|
||||
{
|
||||
return _MainLightShadowParams;
|
||||
}
|
||||
|
||||
|
||||
// ShadowParams
|
||||
// x: ShadowStrength
|
||||
// y: 1.0 if shadow is soft, 0.0 otherwise
|
||||
// z: 1.0 if cast by a point light (6 shadow slices), 0.0 if cast by a spot light (1 shadow slice)
|
||||
// w: first shadow slice index for this light, there can be 6 in case of point lights. (-1 for non-shadow-casting-lights)
|
||||
half4 GetAdditionalLightShadowParams(int lightIndex)
|
||||
{
|
||||
#if USE_STRUCTURED_BUFFER_FOR_LIGHT_DATA
|
||||
return _AdditionalShadowParams_SSBO[lightIndex];
|
||||
#else
|
||||
return _AdditionalShadowParams[lightIndex];
|
||||
#endif
|
||||
}
|
||||
|
||||
half SampleScreenSpaceShadowmap(float4 shadowCoord)
|
||||
{
|
||||
shadowCoord.xy /= shadowCoord.w;
|
||||
|
||||
// The stereo transform has to happen after the manual perspective divide
|
||||
shadowCoord.xy = UnityStereoTransformScreenSpaceTex(shadowCoord.xy);
|
||||
|
||||
#if defined(UNITY_STEREO_INSTANCING_ENABLED) || defined(UNITY_STEREO_MULTIVIEW_ENABLED)
|
||||
half attenuation = SAMPLE_TEXTURE2D_ARRAY(_ScreenSpaceShadowmapTexture, sampler_ScreenSpaceShadowmapTexture, shadowCoord.xy, unity_StereoEyeIndex).x;
|
||||
#else
|
||||
half attenuation = SAMPLE_TEXTURE2D(_ScreenSpaceShadowmapTexture, sampler_ScreenSpaceShadowmapTexture, shadowCoord.xy).x;
|
||||
#endif
|
||||
|
||||
return attenuation;
|
||||
}
|
||||
|
||||
real SampleShadowmapFiltered(TEXTURE2D_SHADOW_PARAM(ShadowMap, sampler_ShadowMap), float4 shadowCoord, ShadowSamplingData samplingData)
|
||||
{
|
||||
real attenuation;
|
||||
|
||||
#if defined(SHADER_API_MOBILE) || defined(SHADER_API_SWITCH)
|
||||
// 4-tap hardware comparison
|
||||
real4 attenuation4;
|
||||
attenuation4.x = SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, shadowCoord.xyz + samplingData.shadowOffset0.xyz);
|
||||
attenuation4.y = SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, shadowCoord.xyz + samplingData.shadowOffset1.xyz);
|
||||
attenuation4.z = SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, shadowCoord.xyz + samplingData.shadowOffset2.xyz);
|
||||
attenuation4.w = SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, shadowCoord.xyz + samplingData.shadowOffset3.xyz);
|
||||
attenuation = dot(attenuation4, 0.25);
|
||||
#else
|
||||
float fetchesWeights[9];
|
||||
float2 fetchesUV[9];
|
||||
SampleShadow_ComputeSamples_Tent_5x5(samplingData.shadowmapSize, shadowCoord.xy, fetchesWeights, fetchesUV);
|
||||
|
||||
attenuation = fetchesWeights[0] * SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, float3(fetchesUV[0].xy, shadowCoord.z));
|
||||
attenuation += fetchesWeights[1] * SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, float3(fetchesUV[1].xy, shadowCoord.z));
|
||||
attenuation += fetchesWeights[2] * SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, float3(fetchesUV[2].xy, shadowCoord.z));
|
||||
attenuation += fetchesWeights[3] * SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, float3(fetchesUV[3].xy, shadowCoord.z));
|
||||
attenuation += fetchesWeights[4] * SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, float3(fetchesUV[4].xy, shadowCoord.z));
|
||||
attenuation += fetchesWeights[5] * SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, float3(fetchesUV[5].xy, shadowCoord.z));
|
||||
attenuation += fetchesWeights[6] * SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, float3(fetchesUV[6].xy, shadowCoord.z));
|
||||
attenuation += fetchesWeights[7] * SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, float3(fetchesUV[7].xy, shadowCoord.z));
|
||||
attenuation += fetchesWeights[8] * SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, float3(fetchesUV[8].xy, shadowCoord.z));
|
||||
#endif
|
||||
|
||||
return attenuation;
|
||||
}
|
||||
|
||||
real SampleShadowmap(TEXTURE2D_SHADOW_PARAM(ShadowMap, sampler_ShadowMap), float4 shadowCoord, ShadowSamplingData samplingData, half4 shadowParams, bool isPerspectiveProjection = true)
|
||||
{
|
||||
// Compiler will optimize this branch away as long as isPerspectiveProjection is known at compile time
|
||||
if (isPerspectiveProjection)
|
||||
shadowCoord.xyz /= shadowCoord.w;
|
||||
|
||||
real attenuation;
|
||||
real shadowStrength = shadowParams.x;
|
||||
|
||||
#ifdef _SHADOWS_SOFT
|
||||
if(shadowParams.y != 0)
|
||||
{
|
||||
attenuation = SampleShadowmapFiltered(TEXTURE2D_SHADOW_ARGS(ShadowMap, sampler_ShadowMap), shadowCoord, samplingData);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
// 1-tap hardware comparison
|
||||
attenuation = SAMPLE_TEXTURE2D_SHADOW(ShadowMap, sampler_ShadowMap, shadowCoord.xyz);
|
||||
}
|
||||
|
||||
attenuation = LerpWhiteTo(attenuation, shadowStrength);
|
||||
|
||||
// Shadow coords that fall out of the light frustum volume must always return attenuation 1.0
|
||||
// TODO: We could use branch here to save some perf on some platforms.
|
||||
return BEYOND_SHADOW_FAR(shadowCoord) ? 1.0 : attenuation;
|
||||
}
|
||||
|
||||
half ComputeCascadeIndex(float3 positionWS)
|
||||
{
|
||||
float3 fromCenter0 = positionWS - _CascadeShadowSplitSpheres0.xyz;
|
||||
float3 fromCenter1 = positionWS - _CascadeShadowSplitSpheres1.xyz;
|
||||
float3 fromCenter2 = positionWS - _CascadeShadowSplitSpheres2.xyz;
|
||||
float3 fromCenter3 = positionWS - _CascadeShadowSplitSpheres3.xyz;
|
||||
float4 distances2 = float4(dot(fromCenter0, fromCenter0), dot(fromCenter1, fromCenter1), dot(fromCenter2, fromCenter2), dot(fromCenter3, fromCenter3));
|
||||
|
||||
half4 weights = half4(distances2 < _CascadeShadowSplitSphereRadii);
|
||||
weights.yzw = saturate(weights.yzw - weights.xyz);
|
||||
|
||||
return 4 - dot(weights, half4(4, 3, 2, 1));
|
||||
}
|
||||
|
||||
float4 TransformWorldToShadowCoord(float3 positionWS)
|
||||
{
|
||||
#ifdef _MAIN_LIGHT_SHADOWS_CASCADE
|
||||
half cascadeIndex = ComputeCascadeIndex(positionWS);
|
||||
#else
|
||||
half cascadeIndex = 0;
|
||||
#endif
|
||||
|
||||
float4 shadowCoord = mul(_MainLightWorldToShadow[cascadeIndex], float4(positionWS, 1.0));
|
||||
|
||||
return float4(shadowCoord.xyz, cascadeIndex);
|
||||
}
|
||||
|
||||
half MainLightRealtimeShadow(float4 shadowCoord)
|
||||
{
|
||||
#if !defined(MAIN_LIGHT_CALCULATE_SHADOWS)
|
||||
return 1.0h;
|
||||
#elif defined(_MAIN_LIGHT_SHADOWS_SCREEN)
|
||||
return SampleScreenSpaceShadowmap(shadowCoord);
|
||||
#else
|
||||
ShadowSamplingData shadowSamplingData = GetMainLightShadowSamplingData();
|
||||
half4 shadowParams = GetMainLightShadowParams();
|
||||
return SampleShadowmap(TEXTURE2D_ARGS(_MainLightShadowmapTexture, sampler_MainLightShadowmapTexture), shadowCoord, shadowSamplingData, shadowParams, false);
|
||||
#endif
|
||||
}
|
||||
|
||||
// returns 0.0 if position is in light's shadow
|
||||
// returns 1.0 if position is in light
|
||||
half AdditionalLightRealtimeShadow(int lightIndex, float3 positionWS, half3 lightDirection)
|
||||
{
|
||||
#if !defined(ADDITIONAL_LIGHT_CALCULATE_SHADOWS)
|
||||
return 1.0h;
|
||||
#endif
|
||||
|
||||
ShadowSamplingData shadowSamplingData = GetAdditionalLightShadowSamplingData();
|
||||
|
||||
half4 shadowParams = GetAdditionalLightShadowParams(lightIndex);
|
||||
|
||||
int shadowSliceIndex = shadowParams.w;
|
||||
|
||||
UNITY_BRANCH
|
||||
if (shadowSliceIndex < 0)
|
||||
return 1.0;
|
||||
|
||||
half isPointLight = shadowParams.z;
|
||||
|
||||
UNITY_BRANCH
|
||||
if (isPointLight)
|
||||
{
|
||||
// This is a point light, we have to find out which shadow slice to sample from
|
||||
float cubemapFaceId = CubeMapFaceID(-lightDirection);
|
||||
shadowSliceIndex += cubemapFaceId;
|
||||
}
|
||||
|
||||
#if USE_STRUCTURED_BUFFER_FOR_LIGHT_DATA
|
||||
float4 shadowCoord = mul(_AdditionalLightsWorldToShadow_SSBO[shadowSliceIndex], float4(positionWS, 1.0));
|
||||
#else
|
||||
float4 shadowCoord = mul(_AdditionalLightsWorldToShadow[shadowSliceIndex], float4(positionWS, 1.0));
|
||||
#endif
|
||||
|
||||
return SampleShadowmap(TEXTURE2D_ARGS(_AdditionalLightsShadowmapTexture, sampler_AdditionalLightsShadowmapTexture), shadowCoord, shadowSamplingData, shadowParams, true);
|
||||
}
|
||||
|
||||
half GetShadowFade(float3 positionWS)
|
||||
{
|
||||
float3 camToPixel = positionWS - _WorldSpaceCameraPos;
|
||||
float distanceCamToPixel2 = dot(camToPixel, camToPixel);
|
||||
|
||||
half fade = saturate(distanceCamToPixel2 * _MainLightShadowParams.z + _MainLightShadowParams.w);
|
||||
return fade * fade;
|
||||
}
|
||||
|
||||
half MixRealtimeAndBakedShadows(half realtimeShadow, half bakedShadow, half shadowFade)
|
||||
{
|
||||
#if defined(LIGHTMAP_SHADOW_MIXING)
|
||||
return min(lerp(realtimeShadow, 1, shadowFade), bakedShadow);
|
||||
#else
|
||||
return lerp(realtimeShadow, bakedShadow, shadowFade);
|
||||
#endif
|
||||
}
|
||||
|
||||
half BakedShadow(half4 shadowMask, half4 occlusionProbeChannels)
|
||||
{
|
||||
// Here occlusionProbeChannels used as mask selector to select shadows in shadowMask
|
||||
// If occlusionProbeChannels all components are zero we use default baked shadow value 1.0
|
||||
// This code is optimized for mobile platforms:
|
||||
// half bakedShadow = any(occlusionProbeChannels) ? dot(shadowMask, occlusionProbeChannels) : 1.0h;
|
||||
half bakedShadow = 1.0h + dot(shadowMask - 1.0h, occlusionProbeChannels);
|
||||
return bakedShadow;
|
||||
}
|
||||
|
||||
half MainLightShadow(float4 shadowCoord, float3 positionWS, half4 shadowMask, half4 occlusionProbeChannels)
|
||||
{
|
||||
half realtimeShadow = MainLightRealtimeShadow(shadowCoord);
|
||||
|
||||
#ifdef CALCULATE_BAKED_SHADOWS
|
||||
half bakedShadow = BakedShadow(shadowMask, occlusionProbeChannels);
|
||||
#else
|
||||
half bakedShadow = 1.0h;
|
||||
#endif
|
||||
|
||||
#ifdef MAIN_LIGHT_CALCULATE_SHADOWS
|
||||
half shadowFade = GetShadowFade(positionWS);
|
||||
#else
|
||||
half shadowFade = 1.0h;
|
||||
#endif
|
||||
|
||||
#if defined(_MAIN_LIGHT_SHADOWS_CASCADE) && defined(CALCULATE_BAKED_SHADOWS)
|
||||
// shadowCoord.w represents shadow cascade index
|
||||
// in case we are out of shadow cascade we need to set shadow fade to 1.0 for correct blending
|
||||
// it is needed when realtime shadows gets cut to early during fade and causes disconnect between baked shadow
|
||||
shadowFade = shadowCoord.w == 4 ? 1.0h : shadowFade;
|
||||
#endif
|
||||
|
||||
return MixRealtimeAndBakedShadows(realtimeShadow, bakedShadow, shadowFade);
|
||||
}
|
||||
|
||||
half AdditionalLightShadow(int lightIndex, float3 positionWS, half3 lightDirection, half4 shadowMask, half4 occlusionProbeChannels)
|
||||
{
|
||||
half realtimeShadow = AdditionalLightRealtimeShadow(lightIndex, positionWS, lightDirection);
|
||||
|
||||
#ifdef CALCULATE_BAKED_SHADOWS
|
||||
half bakedShadow = BakedShadow(shadowMask, occlusionProbeChannels);
|
||||
#else
|
||||
half bakedShadow = 1.0h;
|
||||
#endif
|
||||
|
||||
#ifdef ADDITIONAL_LIGHT_CALCULATE_SHADOWS
|
||||
half shadowFade = GetShadowFade(positionWS);
|
||||
#else
|
||||
half shadowFade = 1.0h;
|
||||
#endif
|
||||
|
||||
return MixRealtimeAndBakedShadows(realtimeShadow, bakedShadow, shadowFade);
|
||||
}
|
||||
|
||||
float4 GetShadowCoord(VertexPositionInputs vertexInput)
|
||||
{
|
||||
#if defined(_MAIN_LIGHT_SHADOWS_SCREEN)
|
||||
return ComputeScreenPos(vertexInput.positionCS);
|
||||
#else
|
||||
return TransformWorldToShadowCoord(vertexInput.positionWS);
|
||||
#endif
|
||||
}
|
||||
|
||||
float3 ApplyShadowBias(float3 positionWS, float3 normalWS, float3 lightDirection)
|
||||
{
|
||||
float invNdotL = 1.0 - saturate(dot(lightDirection, normalWS));
|
||||
float scale = invNdotL * _ShadowBias.y;
|
||||
|
||||
// normal bias is negative since we want to apply an inset normal offset
|
||||
positionWS = lightDirection * _ShadowBias.xxx + positionWS;
|
||||
positionWS = normalWS * scale.xxx + positionWS;
|
||||
return positionWS;
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Deprecated /
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Renamed -> _MainLightShadowParams
|
||||
#define _MainLightShadowData _MainLightShadowParams
|
||||
|
||||
// Deprecated: Use GetShadowFade instead.
|
||||
float ApplyShadowFade(float shadowAttenuation, float3 positionWS)
|
||||
{
|
||||
float fade = GetShadowFade(positionWS);
|
||||
return shadowAttenuation + (1 - shadowAttenuation) * fade * fade;
|
||||
}
|
||||
|
||||
// Deprecated: Use GetMainLightShadowParams instead.
|
||||
half GetMainLightShadowStrength()
|
||||
{
|
||||
return _MainLightShadowData.x;
|
||||
}
|
||||
|
||||
// Deprecated: Use GetAdditionalLightShadowParams instead.
|
||||
half GetAdditionalLightShadowStrenth(int lightIndex)
|
||||
{
|
||||
#if USE_STRUCTURED_BUFFER_FOR_LIGHT_DATA
|
||||
return _AdditionalShadowParams_SSBO[lightIndex].x;
|
||||
#else
|
||||
return _AdditionalShadowParams[lightIndex].x;
|
||||
#endif
|
||||
}
|
||||
|
||||
// Deprecated: Use SampleShadowmap that takes shadowParams instead of strength.
|
||||
real SampleShadowmap(float4 shadowCoord, TEXTURE2D_SHADOW_PARAM(ShadowMap, sampler_ShadowMap), ShadowSamplingData samplingData, half shadowStrength, bool isPerspectiveProjection = true)
|
||||
{
|
||||
half4 shadowParams = half4(shadowStrength, 1.0, 0.0, 0.0);
|
||||
return SampleShadowmap(TEXTURE2D_SHADOW_ARGS(ShadowMap, sampler_ShadowMap), shadowCoord, samplingData, shadowParams, isPerspectiveProjection);
|
||||
}
|
||||
|
||||
// Deprecated: Use AdditionalLightRealtimeShadow(int lightIndex, float3 positionWS, half3 lightDirection) in Shadows.hlsl instead, as it supports Point Light shadows
|
||||
half AdditionalLightRealtimeShadow(int lightIndex, float3 positionWS)
|
||||
{
|
||||
return AdditionalLightRealtimeShadow(lightIndex, positionWS, half3(1, 0, 0));
|
||||
}
|
||||
|
||||
#endif
|
@@ -0,0 +1,19 @@
|
||||
#ifndef UNIVERSAL_SURFACE_DATA_INCLUDED
|
||||
#define UNIVERSAL_SURFACE_DATA_INCLUDED
|
||||
|
||||
// Must match Universal ShaderGraph master node
|
||||
struct SurfaceData
|
||||
{
|
||||
half3 albedo;
|
||||
half3 specular;
|
||||
half metallic;
|
||||
half smoothness;
|
||||
half3 normalTS;
|
||||
half3 emission;
|
||||
half occlusion;
|
||||
half alpha;
|
||||
half clearCoatMask;
|
||||
half clearCoatSmoothness;
|
||||
};
|
||||
|
||||
#endif
|
@@ -0,0 +1,59 @@
|
||||
#ifndef UNIVERSAL_INPUT_SURFACE_INCLUDED
|
||||
#define UNIVERSAL_INPUT_SURFACE_INCLUDED
|
||||
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/SurfaceData.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Packing.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/CommonMaterial.hlsl"
|
||||
|
||||
TEXTURE2D(_BaseMap); SAMPLER(sampler_BaseMap);
|
||||
TEXTURE2D(_BumpMap); SAMPLER(sampler_BumpMap);
|
||||
TEXTURE2D(_EmissionMap); SAMPLER(sampler_EmissionMap);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Material Property Helpers //
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
half Alpha(half albedoAlpha, half4 color, half cutoff)
|
||||
{
|
||||
#if !defined(_SMOOTHNESS_TEXTURE_ALBEDO_CHANNEL_A) && !defined(_GLOSSINESS_FROM_BASE_ALPHA)
|
||||
half alpha = albedoAlpha * color.a;
|
||||
#else
|
||||
half alpha = color.a;
|
||||
#endif
|
||||
|
||||
#if defined(_ALPHATEST_ON)
|
||||
clip(alpha - cutoff);
|
||||
#endif
|
||||
|
||||
return alpha;
|
||||
}
|
||||
|
||||
half4 SampleAlbedoAlpha(float2 uv, TEXTURE2D_PARAM(albedoAlphaMap, sampler_albedoAlphaMap))
|
||||
{
|
||||
return SAMPLE_TEXTURE2D(albedoAlphaMap, sampler_albedoAlphaMap, uv);
|
||||
}
|
||||
|
||||
half3 SampleNormal(float2 uv, TEXTURE2D_PARAM(bumpMap, sampler_bumpMap), half scale = 1.0h)
|
||||
{
|
||||
#ifdef _NORMALMAP
|
||||
half4 n = SAMPLE_TEXTURE2D(bumpMap, sampler_bumpMap, uv);
|
||||
#if BUMP_SCALE_NOT_SUPPORTED
|
||||
return UnpackNormal(n);
|
||||
#else
|
||||
return UnpackNormalScale(n, scale);
|
||||
#endif
|
||||
#else
|
||||
return half3(0.0h, 0.0h, 1.0h);
|
||||
#endif
|
||||
}
|
||||
|
||||
half3 SampleEmission(float2 uv, half3 emissionColor, TEXTURE2D_PARAM(emissionMap, sampler_emissionMap))
|
||||
{
|
||||
#ifndef _EMISSION
|
||||
return 0;
|
||||
#else
|
||||
return SAMPLE_TEXTURE2D(emissionMap, sampler_emissionMap, uv).rgb * emissionColor;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif
|
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"name": "Unity.RenderPipeline.Universal.ShaderLibrary",
|
||||
"references": [
|
||||
"GUID:df380645f10b7bc4b97d4f5eb6303d95"
|
||||
],
|
||||
"includePlatforms": [],
|
||||
"excludePlatforms": [],
|
||||
"allowUnsafeCode": false,
|
||||
"overrideReferences": false,
|
||||
"precompiledReferences": [],
|
||||
"autoReferenced": true,
|
||||
"defineConstraints": [],
|
||||
"versionDefines": []
|
||||
}
|
@@ -0,0 +1,233 @@
|
||||
#ifndef UNIVERSAL_GBUFFERUTIL_INCLUDED
|
||||
#define UNIVERSAL_GBUFFERUTIL_INCLUDED
|
||||
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/SurfaceData.hlsl"
|
||||
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Lighting.hlsl"
|
||||
|
||||
// inspired from [builtin_shaders]/CGIncludes/UnityGBuffer.cginc
|
||||
|
||||
// Non-static meshes with real-time lighting need to write shadow mask, which in that case stores per-object occlusion probe values.
|
||||
#if !defined(LIGHTMAP_ON) && defined(LIGHTMAP_SHADOW_MIXING) && !defined(SHADOWS_SHADOWMASK)
|
||||
#define OUTPUT_SHADOWMASK 1 // subtractive
|
||||
#elif defined(SHADOWS_SHADOWMASK)
|
||||
#define OUTPUT_SHADOWMASK 2 // shadow mask
|
||||
#else
|
||||
#define OUTPUT_SHADOWMASK 0
|
||||
#endif
|
||||
|
||||
#define kLightingInvalid -1 // No dynamic lighting: can aliase any other material type as they are skipped using stencil
|
||||
#define kLightingLit 1 // lit shader
|
||||
#define kLightingSimpleLit 2 // Simple lit shader
|
||||
// clearcoat 3
|
||||
// backscatter 4
|
||||
// skin 5
|
||||
|
||||
// Material flags
|
||||
#define kMaterialFlagReceiveShadowsOff 1 // Does not receive dynamic shadows
|
||||
#define kMaterialFlagSpecularHighlightsOff 2 // Does not receivce specular
|
||||
#define kMaterialFlagSubtractiveMixedLighting 4 // The geometry uses subtractive mixed lighting
|
||||
|
||||
// Light flags.
|
||||
#define kLightFlagSubtractiveMixedLighting 4 // The light uses subtractive mixed lighting.
|
||||
|
||||
struct FragmentOutput
|
||||
{
|
||||
half4 GBuffer0 : SV_Target0;
|
||||
half4 GBuffer1 : SV_Target1;
|
||||
half4 GBuffer2 : SV_Target2;
|
||||
half4 GBuffer3 : SV_Target3; // Camera color attachment
|
||||
#if OUTPUT_SHADOWMASK
|
||||
half4 GBuffer4 : SV_Target4;
|
||||
#endif
|
||||
};
|
||||
|
||||
float PackMaterialFlags(uint materialFlags)
|
||||
{
|
||||
return materialFlags * (1.0h / 255.0h);
|
||||
}
|
||||
|
||||
uint UnpackMaterialFlags(float packedMaterialFlags)
|
||||
{
|
||||
return uint((packedMaterialFlags * 255.0h) + 0.5h);
|
||||
}
|
||||
|
||||
#ifdef _GBUFFER_NORMALS_OCT
|
||||
half3 PackNormal(half3 n)
|
||||
{
|
||||
float2 octNormalWS = PackNormalOctQuadEncode(n); // values between [-1, +1], must use fp32 on Nintendo Switch.
|
||||
float2 remappedOctNormalWS = saturate(octNormalWS * 0.5 + 0.5); // values between [ 0, +1]
|
||||
return PackFloat2To888(remappedOctNormalWS); // values between [ 0, +1]
|
||||
}
|
||||
|
||||
half3 UnpackNormal(half3 pn)
|
||||
{
|
||||
half2 remappedOctNormalWS = Unpack888ToFloat2(pn); // values between [ 0, +1]
|
||||
half2 octNormalWS = remappedOctNormalWS.xy * 2.0h - 1.0h; // values between [-1, +1]
|
||||
return UnpackNormalOctQuadEncode(octNormalWS); // values between [-1, +1]
|
||||
}
|
||||
|
||||
half PackSmoothness(half s, int lightingMode)
|
||||
{
|
||||
if (lightingMode == kLightingSimpleLit) // See SimpleLitInput.hlsl, SampleSpecularSmoothness().
|
||||
return 0.1h * log2(s) - 0.1h; // values between [ 0, +1]
|
||||
else
|
||||
return s; // values between [ 0, +1]
|
||||
}
|
||||
|
||||
half UnpackSmoothness(half ps, int lightingMode)
|
||||
{
|
||||
if (lightingMode == kLightingSimpleLit) // See SimpleLitInput.hlsl, SampleSpecularSmoothness().
|
||||
return exp2(10.0h * ps + 1.0h);
|
||||
else
|
||||
return ps; // values between [ 0, +1]
|
||||
}
|
||||
|
||||
#else
|
||||
half3 PackNormal(half3 n)
|
||||
{ return n; } // values between [-1, +1]
|
||||
|
||||
half3 UnpackNormal(half3 pn)
|
||||
{ return pn; } // values between [-1, +1]
|
||||
|
||||
half PackSmoothness(half s, int lightingMode)
|
||||
{
|
||||
if (lightingMode == kLightingSimpleLit) // See SimpleLitInput.hlsl, SampleSpecularSmoothness().
|
||||
return 0.1h * log2(s) - 0.1h; // Normally values between [-1, +1] but need [0; +1] to make terrain blending works
|
||||
else
|
||||
return s; // Normally values between [-1, +1] but need [0; +1] to make terrain blending works
|
||||
}
|
||||
|
||||
half UnpackSmoothness(half ps, int lightingMode)
|
||||
{
|
||||
if (lightingMode == kLightingSimpleLit) // See SimpleLitInput.hlsl, SampleSpecularSmoothness().
|
||||
return exp2(10.0h * ps + 1.0h); // values between [ 0, +1]
|
||||
else
|
||||
return ps; // values between [ 0, +1]
|
||||
}
|
||||
#endif
|
||||
|
||||
// This will encode SurfaceData into GBuffer
|
||||
FragmentOutput SurfaceDataToGbuffer(SurfaceData surfaceData, InputData inputData, half3 globalIllumination, int lightingMode)
|
||||
{
|
||||
half3 packedNormalWS = PackNormal(inputData.normalWS);
|
||||
half packedSmoothness = PackSmoothness(surfaceData.smoothness, lightingMode);
|
||||
|
||||
uint materialFlags = 0;
|
||||
|
||||
// SimpleLit does not use _SPECULARHIGHLIGHTS_OFF to disable specular highlights.
|
||||
|
||||
#ifdef _RECEIVE_SHADOWS_OFF
|
||||
materialFlags |= kMaterialFlagReceiveShadowsOff;
|
||||
#endif
|
||||
|
||||
#if defined(LIGHTMAP_ON) && defined(_MIXED_LIGHTING_SUBTRACTIVE)
|
||||
materialFlags |= kMaterialFlagSubtractiveMixedLighting;
|
||||
#endif
|
||||
|
||||
FragmentOutput output;
|
||||
output.GBuffer0 = half4(surfaceData.albedo.rgb, PackMaterialFlags(materialFlags)); // albedo albedo albedo materialFlags (sRGB rendertarget)
|
||||
output.GBuffer1 = half4(surfaceData.specular.rgb, 0); // specular specular specular [unused] (sRGB rendertarget)
|
||||
output.GBuffer2 = half4(packedNormalWS, packedSmoothness); // encoded-normal encoded-normal encoded-normal packed-smoothness
|
||||
output.GBuffer3 = half4(globalIllumination, 1); // GI GI GI [optional: see OutputAlpha()] (lighting buffer)
|
||||
#if OUTPUT_SHADOWMASK
|
||||
output.GBuffer4 = inputData.shadowMask; // will have unity_ProbesOcclusion value if subtractive lighting is used (baked)
|
||||
#endif
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
// This decodes the Gbuffer into a SurfaceData struct
|
||||
SurfaceData SurfaceDataFromGbuffer(half4 gbuffer0, half4 gbuffer1, half4 gbuffer2, int lightingMode)
|
||||
{
|
||||
SurfaceData surfaceData;
|
||||
|
||||
surfaceData.albedo = gbuffer0.rgb;
|
||||
uint materialFlags = UnpackMaterialFlags(gbuffer0.a);
|
||||
surfaceData.occlusion = 1.0; // Not used by SimpleLit material.
|
||||
surfaceData.specular = gbuffer1.rgb;
|
||||
half smoothness = UnpackSmoothness(gbuffer2.a, lightingMode);
|
||||
|
||||
surfaceData.metallic = 0.0; // Not used by SimpleLit material.
|
||||
surfaceData.alpha = 1.0; // gbuffer only contains opaque materials
|
||||
surfaceData.smoothness = smoothness;
|
||||
|
||||
surfaceData.emission = (half3)0; // Note: this is not made available at lighting pass in this renderer - emission contribution is included (with GI) in the value GBuffer3.rgb, that is used as a renderTarget during lighting
|
||||
surfaceData.normalTS = (half3)0; // Note: does this normalTS member need to be in SurfaceData? It looks like an intermediate value
|
||||
|
||||
return surfaceData;
|
||||
}
|
||||
|
||||
// This will encode SurfaceData into GBuffer
|
||||
FragmentOutput BRDFDataToGbuffer(BRDFData brdfData, InputData inputData, half smoothness, half3 globalIllumination)
|
||||
{
|
||||
half3 packedNormalWS = PackNormal(inputData.normalWS);
|
||||
half packedSmoothness = PackSmoothness(smoothness, kLightingLit);
|
||||
|
||||
uint materialFlags = 0;
|
||||
|
||||
#ifdef _RECEIVE_SHADOWS_OFF
|
||||
materialFlags |= kMaterialFlagReceiveShadowsOff;
|
||||
#endif
|
||||
|
||||
half3 specular = brdfData.specular.rgb;
|
||||
#ifdef _SPECULARHIGHLIGHTS_OFF
|
||||
// During the next deferred shading pass, we don't use a shader variant to disable specular calculations.
|
||||
// Instead, we can either silence specular contribution when writing the gbuffer, and/or reserve a bit in the gbuffer
|
||||
// and use this during shading to skip computations via dynamic branching. Fastest option depends on platforms.
|
||||
materialFlags |= kMaterialFlagSpecularHighlightsOff;
|
||||
specular = 0.0.xxx;
|
||||
#endif
|
||||
|
||||
#if defined(LIGHTMAP_ON) && defined(_MIXED_LIGHTING_SUBTRACTIVE)
|
||||
materialFlags |= kMaterialFlagSubtractiveMixedLighting;
|
||||
#endif
|
||||
|
||||
FragmentOutput output;
|
||||
output.GBuffer0 = half4(brdfData.diffuse.rgb, PackMaterialFlags(materialFlags)); // diffuse diffuse diffuse materialFlags (sRGB rendertarget)
|
||||
output.GBuffer1 = half4(specular, brdfData.reflectivity); // specular specular specular reflectivity (sRGB rendertarget)
|
||||
output.GBuffer2 = half4(packedNormalWS, packedSmoothness); // encoded-normal encoded-normal encoded-normal smoothness
|
||||
output.GBuffer3 = half4(globalIllumination, 1); // GI GI GI [optional: see OutputAlpha()] (lighting buffer)
|
||||
#if OUTPUT_SHADOWMASK
|
||||
output.GBuffer4 = inputData.shadowMask; // will have unity_ProbesOcclusion value if subtractive lighting is used (baked)
|
||||
#endif
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
// This decodes the Gbuffer into a SurfaceData struct
|
||||
BRDFData BRDFDataFromGbuffer(half4 gbuffer0, half4 gbuffer1, half4 gbuffer2)
|
||||
{
|
||||
half3 diffuse = gbuffer0.rgb;
|
||||
uint materialFlags = UnpackMaterialFlags(gbuffer0.a);
|
||||
half3 specular = gbuffer1.rgb;
|
||||
half reflectivity = gbuffer1.a;
|
||||
half oneMinusReflectivity = 1.0h - reflectivity;
|
||||
half smoothness = UnpackSmoothness(gbuffer2.a, kLightingLit);
|
||||
|
||||
BRDFData brdfData = (BRDFData)0;
|
||||
half alpha = 1.0; // NOTE: alpha can get modfied, forward writes it out (_ALPHAPREMULTIPLY_ON).
|
||||
InitializeBRDFDataDirect(diffuse, specular, reflectivity, oneMinusReflectivity, smoothness, alpha, brdfData);
|
||||
|
||||
return brdfData;
|
||||
}
|
||||
|
||||
InputData InputDataFromGbufferAndWorldPosition(half4 gbuffer2, float3 wsPos)
|
||||
{
|
||||
InputData inputData;
|
||||
|
||||
inputData.positionWS = wsPos;
|
||||
inputData.normalWS = normalize(UnpackNormal(gbuffer2.xyz)); // normalize() is required because terrain shaders use additive blending for normals (not unit-length anymore)
|
||||
|
||||
inputData.viewDirectionWS = SafeNormalize(GetWorldSpaceViewDir(wsPos.xyz));
|
||||
|
||||
// TODO: pass this info?
|
||||
inputData.shadowCoord = (float4)0;
|
||||
inputData.fogCoord = (half )0;
|
||||
inputData.vertexLighting = (half3 )0;
|
||||
|
||||
inputData.bakedGI = (half3)0; // Note: this is not made available at lighting pass in this renderer - bakedGI contribution is included (with emission) in the value GBuffer3.rgb, that is used as a renderTarget during lighting
|
||||
|
||||
return inputData;
|
||||
}
|
||||
|
||||
#endif // UNIVERSAL_GBUFFERUTIL_INCLUDED
|
@@ -0,0 +1,256 @@
|
||||
// UNITY_SHADER_NO_UPGRADE
|
||||
|
||||
#ifndef UNIVERSAL_SHADER_VARIABLES_INCLUDED
|
||||
#define UNIVERSAL_SHADER_VARIABLES_INCLUDED
|
||||
|
||||
#if defined(STEREO_INSTANCING_ON) && (defined(SHADER_API_D3D11) || defined(SHADER_API_GLES3) || defined(SHADER_API_GLCORE) || defined(SHADER_API_PSSL) || defined(SHADER_API_VULKAN))
|
||||
#define UNITY_STEREO_INSTANCING_ENABLED
|
||||
#endif
|
||||
|
||||
#if defined(STEREO_MULTIVIEW_ON) && (defined(SHADER_API_GLES3) || defined(SHADER_API_GLCORE) || defined(SHADER_API_VULKAN)) && !(defined(SHADER_API_SWITCH))
|
||||
#define UNITY_STEREO_MULTIVIEW_ENABLED
|
||||
#endif
|
||||
|
||||
#if defined(UNITY_SINGLE_PASS_STEREO) || defined(UNITY_STEREO_INSTANCING_ENABLED) || defined(UNITY_STEREO_MULTIVIEW_ENABLED)
|
||||
#define USING_STEREO_MATRICES
|
||||
#endif
|
||||
|
||||
#if defined(USING_STEREO_MATRICES)
|
||||
// Current pass transforms.
|
||||
#define glstate_matrix_projection unity_StereoMatrixP[unity_StereoEyeIndex] // goes through GL.GetGPUProjectionMatrix()
|
||||
#define unity_MatrixV unity_StereoMatrixV[unity_StereoEyeIndex]
|
||||
#define unity_MatrixInvV unity_StereoMatrixInvV[unity_StereoEyeIndex]
|
||||
#define unity_MatrixInvP unity_StereoMatrixInvP[unity_StereoEyeIndex]
|
||||
#define unity_MatrixVP unity_StereoMatrixVP[unity_StereoEyeIndex]
|
||||
#define unity_MatrixInvVP unity_StereoMatrixInvVP[unity_StereoEyeIndex]
|
||||
|
||||
// Camera transform (but the same as pass transform for XR).
|
||||
#define unity_CameraProjection unity_StereoCameraProjection[unity_StereoEyeIndex] // Does not go through GL.GetGPUProjectionMatrix()
|
||||
#define unity_CameraInvProjection unity_StereoCameraInvProjection[unity_StereoEyeIndex]
|
||||
#define unity_WorldToCamera unity_StereoMatrixV[unity_StereoEyeIndex] // Should be unity_StereoWorldToCamera but no use-case in XR pass
|
||||
#define unity_CameraToWorld unity_StereoMatrixInvV[unity_StereoEyeIndex] // Should be unity_StereoCameraToWorld but no use-case in XR pass
|
||||
#define _WorldSpaceCameraPos unity_StereoWorldSpaceCameraPos[unity_StereoEyeIndex]
|
||||
#endif
|
||||
|
||||
#define UNITY_LIGHTMODEL_AMBIENT (glstate_lightmodel_ambient * 2)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// Time (t = time since current level load) values from Unity
|
||||
float4 _Time; // (t/20, t, t*2, t*3)
|
||||
float4 _SinTime; // sin(t/8), sin(t/4), sin(t/2), sin(t)
|
||||
float4 _CosTime; // cos(t/8), cos(t/4), cos(t/2), cos(t)
|
||||
float4 unity_DeltaTime; // dt, 1/dt, smoothdt, 1/smoothdt
|
||||
float4 _TimeParameters; // t, sin(t), cos(t)
|
||||
|
||||
#if !defined(USING_STEREO_MATRICES)
|
||||
float3 _WorldSpaceCameraPos;
|
||||
#endif
|
||||
|
||||
// x = 1 or -1 (-1 if projection is flipped)
|
||||
// y = near plane
|
||||
// z = far plane
|
||||
// w = 1/far plane
|
||||
float4 _ProjectionParams;
|
||||
|
||||
// x = width
|
||||
// y = height
|
||||
// z = 1 + 1.0/width
|
||||
// w = 1 + 1.0/height
|
||||
float4 _ScreenParams;
|
||||
|
||||
// Values used to linearize the Z buffer (http://www.humus.name/temp/Linearize%20depth.txt)
|
||||
// x = 1-far/near
|
||||
// y = far/near
|
||||
// z = x/far
|
||||
// w = y/far
|
||||
// or in case of a reversed depth buffer (UNITY_REVERSED_Z is 1)
|
||||
// x = -1+far/near
|
||||
// y = 1
|
||||
// z = x/far
|
||||
// w = 1/far
|
||||
float4 _ZBufferParams;
|
||||
|
||||
// x = orthographic camera's width
|
||||
// y = orthographic camera's height
|
||||
// z = unused
|
||||
// w = 1.0 if camera is ortho, 0.0 if perspective
|
||||
float4 unity_OrthoParams;
|
||||
|
||||
// scaleBias.x = flipSign
|
||||
// scaleBias.y = scale
|
||||
// scaleBias.z = bias
|
||||
// scaleBias.w = unused
|
||||
uniform float4 _ScaleBias;
|
||||
uniform float4 _ScaleBiasRt;
|
||||
|
||||
float4 unity_CameraWorldClipPlanes[6];
|
||||
|
||||
#if !defined(USING_STEREO_MATRICES)
|
||||
// Projection matrices of the camera. Note that this might be different from projection matrix
|
||||
// that is set right now, e.g. while rendering shadows the matrices below are still the projection
|
||||
// of original camera.
|
||||
float4x4 unity_CameraProjection;
|
||||
float4x4 unity_CameraInvProjection;
|
||||
float4x4 unity_WorldToCamera;
|
||||
float4x4 unity_CameraToWorld;
|
||||
#endif
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// Block Layout should be respected due to SRP Batcher
|
||||
CBUFFER_START(UnityPerDraw)
|
||||
// Space block Feature
|
||||
float4x4 unity_ObjectToWorld;
|
||||
float4x4 unity_WorldToObject;
|
||||
float4 unity_LODFade; // x is the fade value ranging within [0,1]. y is x quantized into 16 levels
|
||||
real4 unity_WorldTransformParams; // w is usually 1.0, or -1.0 for odd-negative scale transforms
|
||||
|
||||
// Light Indices block feature
|
||||
// These are set internally by the engine upon request by RendererConfiguration.
|
||||
real4 unity_LightData;
|
||||
real4 unity_LightIndices[2];
|
||||
|
||||
float4 unity_ProbesOcclusion;
|
||||
|
||||
// Reflection Probe 0 block feature
|
||||
// HDR environment map decode instructions
|
||||
real4 unity_SpecCube0_HDR;
|
||||
|
||||
// Lightmap block feature
|
||||
float4 unity_LightmapST;
|
||||
float4 unity_LightmapIndex;
|
||||
float4 unity_DynamicLightmapST;
|
||||
|
||||
// SH block feature
|
||||
real4 unity_SHAr;
|
||||
real4 unity_SHAg;
|
||||
real4 unity_SHAb;
|
||||
real4 unity_SHBr;
|
||||
real4 unity_SHBg;
|
||||
real4 unity_SHBb;
|
||||
real4 unity_SHC;
|
||||
CBUFFER_END
|
||||
|
||||
#if defined(USING_STEREO_MATRICES)
|
||||
CBUFFER_START(UnityStereoViewBuffer)
|
||||
float4x4 unity_StereoMatrixP[2];
|
||||
float4x4 unity_StereoMatrixInvP[2];
|
||||
float4x4 unity_StereoMatrixV[2];
|
||||
float4x4 unity_StereoMatrixInvV[2];
|
||||
float4x4 unity_StereoMatrixVP[2];
|
||||
float4x4 unity_StereoMatrixInvVP[2];
|
||||
|
||||
float4x4 unity_StereoCameraProjection[2];
|
||||
float4x4 unity_StereoCameraInvProjection[2];
|
||||
|
||||
float3 unity_StereoWorldSpaceCameraPos[2];
|
||||
float4 unity_StereoScaleOffset[2];
|
||||
CBUFFER_END
|
||||
#endif
|
||||
|
||||
#if defined(USING_STEREO_MATRICES) && defined(UNITY_STEREO_MULTIVIEW_ENABLED)
|
||||
CBUFFER_START(UnityStereoEyeIndices)
|
||||
float4 unity_StereoEyeIndices[2];
|
||||
CBUFFER_END
|
||||
#endif
|
||||
|
||||
#if defined(UNITY_STEREO_MULTIVIEW_ENABLED) && defined(SHADER_STAGE_VERTEX)
|
||||
// OVR_multiview
|
||||
// In order to convey this info over the DX compiler, we wrap it into a cbuffer.
|
||||
#if !defined(UNITY_DECLARE_MULTIVIEW)
|
||||
#define UNITY_DECLARE_MULTIVIEW(number_of_views) CBUFFER_START(OVR_multiview) uint gl_ViewID; uint numViews_##number_of_views; CBUFFER_END
|
||||
#define UNITY_VIEWID gl_ViewID
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(UNITY_STEREO_MULTIVIEW_ENABLED) && defined(SHADER_STAGE_VERTEX)
|
||||
#define unity_StereoEyeIndex UNITY_VIEWID
|
||||
UNITY_DECLARE_MULTIVIEW(2);
|
||||
#elif defined(UNITY_STEREO_INSTANCING_ENABLED) || defined(UNITY_STEREO_MULTIVIEW_ENABLED)
|
||||
static uint unity_StereoEyeIndex;
|
||||
#elif defined(UNITY_SINGLE_PASS_STEREO)
|
||||
CBUFFER_START(UnityStereoEyeIndex)
|
||||
int unity_StereoEyeIndex;
|
||||
CBUFFER_END
|
||||
#endif
|
||||
|
||||
float4x4 glstate_matrix_transpose_modelview0;
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
real4 glstate_lightmodel_ambient;
|
||||
real4 unity_AmbientSky;
|
||||
real4 unity_AmbientEquator;
|
||||
real4 unity_AmbientGround;
|
||||
real4 unity_IndirectSpecColor;
|
||||
float4 unity_FogParams;
|
||||
real4 unity_FogColor;
|
||||
|
||||
#if !defined(USING_STEREO_MATRICES)
|
||||
float4x4 glstate_matrix_projection;
|
||||
float4x4 unity_MatrixV;
|
||||
float4x4 unity_MatrixInvV;
|
||||
float4x4 unity_MatrixInvP;
|
||||
float4x4 unity_MatrixVP;
|
||||
float4x4 unity_MatrixInvVP;
|
||||
float4 unity_StereoScaleOffset;
|
||||
int unity_StereoEyeIndex;
|
||||
#endif
|
||||
|
||||
real4 unity_ShadowColor;
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// Unity specific
|
||||
TEXTURECUBE(unity_SpecCube0);
|
||||
SAMPLER(samplerunity_SpecCube0);
|
||||
|
||||
// Main lightmap
|
||||
TEXTURE2D(unity_Lightmap);
|
||||
SAMPLER(samplerunity_Lightmap);
|
||||
TEXTURE2D_ARRAY(unity_Lightmaps);
|
||||
SAMPLER(samplerunity_Lightmaps);
|
||||
|
||||
// Dual or directional lightmap (always used with unity_Lightmap, so can share sampler)
|
||||
TEXTURE2D(unity_LightmapInd);
|
||||
TEXTURE2D_ARRAY(unity_LightmapsInd);
|
||||
|
||||
TEXTURE2D(unity_ShadowMask);
|
||||
SAMPLER(samplerunity_ShadowMask);
|
||||
TEXTURE2D_ARRAY(unity_ShadowMasks);
|
||||
SAMPLER(samplerunity_ShadowMasks);
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
// TODO: all affine matrices should be 3x4.
|
||||
// TODO: sort these vars by the frequency of use (descending), and put commonly used vars together.
|
||||
// Note: please use UNITY_MATRIX_X macros instead of referencing matrix variables directly.
|
||||
float4x4 _PrevViewProjMatrix;
|
||||
float4x4 _ViewProjMatrix;
|
||||
float4x4 _NonJitteredViewProjMatrix;
|
||||
float4x4 _ViewMatrix;
|
||||
float4x4 _ProjMatrix;
|
||||
float4x4 _InvViewProjMatrix;
|
||||
float4x4 _InvViewMatrix;
|
||||
float4x4 _InvProjMatrix;
|
||||
float4 _InvProjParam;
|
||||
float4 _ScreenSize; // {w, h, 1/w, 1/h}
|
||||
float4 _FrustumPlanes[6]; // {(a, b, c) = N, d = -dot(N, P)} [L, R, T, B, N, F]
|
||||
|
||||
float4x4 OptimizeProjectionMatrix(float4x4 M)
|
||||
{
|
||||
// Matrix format (x = non-constant value).
|
||||
// Orthographic Perspective Combined(OR)
|
||||
// | x 0 0 x | | x 0 x 0 | | x 0 x x |
|
||||
// | 0 x 0 x | | 0 x x 0 | | 0 x x x |
|
||||
// | x x x x | | x x x x | | x x x x | <- oblique projection row
|
||||
// | 0 0 0 1 | | 0 0 x 0 | | 0 0 x x |
|
||||
// Notice that some values are always 0.
|
||||
// We can avoid loading and doing math with constants.
|
||||
M._21_41 = 0;
|
||||
M._12_42 = 0;
|
||||
return M;
|
||||
}
|
||||
|
||||
#endif // UNIVERSAL_SHADER_VARIABLES_INCLUDED
|
@@ -0,0 +1,50 @@
|
||||
#ifndef UNIVERSAL_DOTS_INSTANCING_INCLUDED
|
||||
#define UNIVERSAL_DOTS_INSTANCING_INCLUDED
|
||||
|
||||
#ifdef UNITY_DOTS_INSTANCING_ENABLED
|
||||
|
||||
#undef unity_ObjectToWorld
|
||||
#undef unity_WorldToObject
|
||||
// TODO: This might not work correctly in all cases, double check!
|
||||
UNITY_DOTS_INSTANCING_START(BuiltinPropertyMetadata)
|
||||
UNITY_DOTS_INSTANCED_PROP(float3x4, unity_ObjectToWorld)
|
||||
UNITY_DOTS_INSTANCED_PROP(float3x4, unity_WorldToObject)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_LODFade)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_WorldTransformParams)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_LightData)
|
||||
UNITY_DOTS_INSTANCED_PROP(float2x4, unity_LightIndices)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_ProbesOcclusion)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_SpecCube0_HDR)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_LightmapST)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_LightmapIndex)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_DynamicLightmapST)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_SHAr)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_SHAg)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_SHAb)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_SHBr)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_SHBg)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_SHBb)
|
||||
UNITY_DOTS_INSTANCED_PROP(float4, unity_SHC)
|
||||
UNITY_DOTS_INSTANCING_END(BuiltinPropertyMetadata)
|
||||
|
||||
// Note: Macros for unity_ObjectToWorld and unity_WorldToObject are declared in UnityInstancing.hlsl
|
||||
// because of some special handling
|
||||
#define unity_LODFade UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_LODFade)
|
||||
#define unity_WorldTransformParams UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_WorldTransformParams)
|
||||
#define unity_LightData UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_LightData)
|
||||
#define unity_LightIndices UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float2x4, Metadata_unity_LightIndices)
|
||||
#define unity_ProbesOcclusion UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_ProbesOcclusion)
|
||||
#define unity_SpecCube0_HDR UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_SpecCube0_HDR)
|
||||
#define unity_LightmapST UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_LightmapST)
|
||||
#define unity_LightmapIndex UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_LightmapIndex)
|
||||
#define unity_DynamicLightmapST UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_DynamicLightmapST)
|
||||
#define unity_SHAr UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_SHAr)
|
||||
#define unity_SHAg UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_SHAg)
|
||||
#define unity_SHAb UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_SHAb)
|
||||
#define unity_SHBr UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_SHBr)
|
||||
#define unity_SHBg UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_SHBg)
|
||||
#define unity_SHBb UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_SHBb)
|
||||
#define unity_SHC UNITY_ACCESS_DOTS_INSTANCED_PROP_FROM_MACRO(float4, Metadata_unity_SHC)
|
||||
#endif
|
||||
|
||||
#endif
|
Reference in New Issue
Block a user