using System; using System.Diagnostics; using System.Collections.Generic; using Unity.Collections; using UnityEngine.Profiling; namespace UnityEngine.Rendering.Universal { /// /// Class ScriptableRenderer implements a rendering strategy. It describes how culling and lighting works and /// the effects supported. /// /// A renderer can be used for all cameras or be overridden on a per-camera basis. It will implement light culling and setup /// and describe a list of ScriptableRenderPass to execute in a frame. The renderer can be extended to support more effect with additional /// ScriptableRendererFeature. Resources for the renderer are serialized in ScriptableRendererData. /// /// he renderer resources are serialized in ScriptableRendererData. /// /// /// /// public abstract partial class ScriptableRenderer : IDisposable { private static class Profiling { private const string k_Name = nameof(ScriptableRenderer); public static readonly ProfilingSampler setPerCameraShaderVariables = new ProfilingSampler($"{k_Name}.{nameof(SetPerCameraShaderVariables)}"); public static readonly ProfilingSampler sortRenderPasses = new ProfilingSampler($"Sort Render Passes"); public static readonly ProfilingSampler setupLights = new ProfilingSampler($"{k_Name}.{nameof(SetupLights)}"); public static readonly ProfilingSampler setupCamera = new ProfilingSampler($"Setup Camera Parameters"); public static readonly ProfilingSampler addRenderPasses = new ProfilingSampler($"{k_Name}.{nameof(AddRenderPasses)}"); public static readonly ProfilingSampler clearRenderingState = new ProfilingSampler($"{k_Name}.{nameof(ClearRenderingState)}"); public static readonly ProfilingSampler internalStartRendering = new ProfilingSampler($"{k_Name}.{nameof(InternalStartRendering)}"); public static readonly ProfilingSampler internalFinishRendering = new ProfilingSampler($"{k_Name}.{nameof(InternalFinishRendering)}"); public static class RenderBlock { private const string k_Name = nameof(RenderPassBlock); public static readonly ProfilingSampler beforeRendering = new ProfilingSampler($"{k_Name}.{nameof(RenderPassBlock.BeforeRendering)}"); public static readonly ProfilingSampler mainRenderingOpaque = new ProfilingSampler($"{k_Name}.{nameof(RenderPassBlock.MainRenderingOpaque)}"); public static readonly ProfilingSampler mainRenderingTransparent = new ProfilingSampler($"{k_Name}.{nameof(RenderPassBlock.MainRenderingTransparent)}"); public static readonly ProfilingSampler afterRendering = new ProfilingSampler($"{k_Name}.{nameof(RenderPassBlock.AfterRendering)}"); } public static class RenderPass { private const string k_Name = nameof(ScriptableRenderPass); public static readonly ProfilingSampler configure = new ProfilingSampler($"{k_Name}.{nameof(ScriptableRenderPass.Configure)}"); } } /// /// Override to provide a custom profiling name /// protected ProfilingSampler profilingExecute { get; set; } /// /// Configures the supported features for this renderer. When creating custom renderers /// for Universal Render Pipeline you can choose to opt-in or out for specific features. /// public class RenderingFeatures { /// /// This setting controls if the camera editor should display the camera stack category. /// Renderers that don't support camera stacking will only render camera of type CameraRenderType.Base /// /// /// public bool cameraStacking { get; set; } = false; /// /// This setting controls if the Universal Render Pipeline asset should expose MSAA option. /// public bool msaa { get; set; } = true; } /// /// The renderer we are currently rendering with, for low-level render control only. /// current is null outside rendering scope. /// Similar to https://docs.unity3d.com/ScriptReference/Camera-current.html /// internal static ScriptableRenderer current = null; /// /// Set camera matrices. This method will set UNITY_MATRIX_V, UNITY_MATRIX_P, UNITY_MATRIX_VP to camera matrices. /// Additionally this will also set unity_CameraProjection and unity_CameraProjection. /// If setInverseMatrices is set to true this function will also set UNITY_MATRIX_I_V and UNITY_MATRIX_I_VP. /// This function has no effect when rendering in stereo. When in stereo rendering you cannot override camera matrices. /// If you need to set general purpose view and projection matrices call instead. /// /// CommandBuffer to submit data to GPU. /// CameraData containing camera matrices information. /// Set this to true if you also need to set inverse camera matrices. public static void SetCameraMatrices(CommandBuffer cmd, ref CameraData cameraData, bool setInverseMatrices) { #if ENABLE_VR && ENABLE_XR_MODULE if (cameraData.xr.enabled) { cameraData.xr.UpdateGPUViewAndProjectionMatrices(cmd, ref cameraData, cameraData.xr.renderTargetIsRenderTexture); return; } #endif Matrix4x4 viewMatrix = cameraData.GetViewMatrix(); Matrix4x4 projectionMatrix = cameraData.GetProjectionMatrix(); // TODO: Investigate why SetViewAndProjectionMatrices is causing y-flip / winding order issue // for now using cmd.SetViewProjecionMatrices //SetViewAndProjectionMatrices(cmd, viewMatrix, cameraData.GetDeviceProjectionMatrix(), setInverseMatrices); cmd.SetViewProjectionMatrices(viewMatrix, projectionMatrix); if (setInverseMatrices) { Matrix4x4 gpuProjectionMatrix = cameraData.GetGPUProjectionMatrix(); Matrix4x4 viewAndProjectionMatrix = gpuProjectionMatrix * viewMatrix; Matrix4x4 inverseViewMatrix = Matrix4x4.Inverse(viewMatrix); Matrix4x4 inverseProjectionMatrix = Matrix4x4.Inverse(gpuProjectionMatrix); Matrix4x4 inverseViewProjection = inverseViewMatrix * inverseProjectionMatrix; // There's an inconsistency in handedness between unity_matrixV and unity_WorldToCamera // Unity changes the handedness of unity_WorldToCamera (see Camera::CalculateMatrixShaderProps) // we will also change it here to avoid breaking existing shaders. (case 1257518) Matrix4x4 worldToCameraMatrix = Matrix4x4.Scale(new Vector3(1.0f, 1.0f, -1.0f)) * viewMatrix; Matrix4x4 cameraToWorldMatrix = worldToCameraMatrix.inverse; cmd.SetGlobalMatrix(ShaderPropertyId.worldToCameraMatrix, worldToCameraMatrix); cmd.SetGlobalMatrix(ShaderPropertyId.cameraToWorldMatrix, cameraToWorldMatrix); cmd.SetGlobalMatrix(ShaderPropertyId.inverseViewMatrix, inverseViewMatrix); cmd.SetGlobalMatrix(ShaderPropertyId.inverseProjectionMatrix, inverseProjectionMatrix); cmd.SetGlobalMatrix(ShaderPropertyId.inverseViewAndProjectionMatrix, inverseViewProjection); } // TODO: missing unity_CameraWorldClipPlanes[6], currently set by context.SetupCameraProperties } /// /// Set camera and screen shader variables as described in https://docs.unity3d.com/Manual/SL-UnityShaderVariables.html /// /// CommandBuffer to submit data to GPU. /// CameraData containing camera matrices information. void SetPerCameraShaderVariables(CommandBuffer cmd, ref CameraData cameraData) { using var profScope = new ProfilingScope(cmd, Profiling.setPerCameraShaderVariables); Camera camera = cameraData.camera; Rect pixelRect = cameraData.pixelRect; float renderScale = cameraData.isSceneViewCamera ? 1f : cameraData.renderScale; float scaledCameraWidth = (float)pixelRect.width * renderScale; float scaledCameraHeight = (float)pixelRect.height * renderScale; float cameraWidth = (float)pixelRect.width; float cameraHeight = (float)pixelRect.height; // Use eye texture's width and height as screen params when XR is enabled if (cameraData.xr.enabled) { scaledCameraWidth = (float)cameraData.cameraTargetDescriptor.width; scaledCameraHeight = (float)cameraData.cameraTargetDescriptor.height; cameraWidth = (float)cameraData.cameraTargetDescriptor.width; cameraHeight = (float)cameraData.cameraTargetDescriptor.height; } if (camera.allowDynamicResolution) { scaledCameraWidth *= ScalableBufferManager.widthScaleFactor; scaledCameraHeight *= ScalableBufferManager.heightScaleFactor; } float near = camera.nearClipPlane; float far = camera.farClipPlane; float invNear = Mathf.Approximately(near, 0.0f) ? 0.0f : 1.0f / near; float invFar = Mathf.Approximately(far, 0.0f) ? 0.0f : 1.0f / far; float isOrthographic = camera.orthographic ? 1.0f : 0.0f; // From http://www.humus.name/temp/Linearize%20depth.txt // But as depth component textures on OpenGL always return in 0..1 range (as in D3D), we have to use // the same constants for both D3D and OpenGL here. // OpenGL would be this: // zc0 = (1.0 - far / near) / 2.0; // zc1 = (1.0 + far / near) / 2.0; // D3D is this: float zc0 = 1.0f - far * invNear; float zc1 = far * invNear; Vector4 zBufferParams = new Vector4(zc0, zc1, zc0 * invFar, zc1 * invFar); if (SystemInfo.usesReversedZBuffer) { zBufferParams.y += zBufferParams.x; zBufferParams.x = -zBufferParams.x; zBufferParams.w += zBufferParams.z; zBufferParams.z = -zBufferParams.z; } // Projection flip sign logic is very deep in GfxDevice::SetInvertProjectionMatrix // For now we don't deal with _ProjectionParams.x and let SetupCameraProperties handle it. // We need to enable this when we remove SetupCameraProperties // float projectionFlipSign = ??? // Vector4 projectionParams = new Vector4(projectionFlipSign, near, far, 1.0f * invFar); // cmd.SetGlobalVector(ShaderPropertyId.projectionParams, projectionParams); Vector4 orthoParams = new Vector4(camera.orthographicSize * cameraData.aspectRatio, camera.orthographicSize, 0.0f, isOrthographic); // Camera and Screen variables as described in https://docs.unity3d.com/Manual/SL-UnityShaderVariables.html cmd.SetGlobalVector(ShaderPropertyId.worldSpaceCameraPos, camera.transform.position); cmd.SetGlobalVector(ShaderPropertyId.screenParams, new Vector4(cameraWidth, cameraHeight, 1.0f + 1.0f / cameraWidth, 1.0f + 1.0f / cameraHeight)); cmd.SetGlobalVector(ShaderPropertyId.scaledScreenParams, new Vector4(scaledCameraWidth, scaledCameraHeight, 1.0f + 1.0f / scaledCameraWidth, 1.0f + 1.0f / scaledCameraHeight)); cmd.SetGlobalVector(ShaderPropertyId.zBufferParams, zBufferParams); cmd.SetGlobalVector(ShaderPropertyId.orthoParams, orthoParams); } /// /// Set shader time variables as described in https://docs.unity3d.com/Manual/SL-UnityShaderVariables.html /// /// CommandBuffer to submit data to GPU. /// Time. /// Delta time. /// Smooth delta time. void SetShaderTimeValues(CommandBuffer cmd, float time, float deltaTime, float smoothDeltaTime) { float timeEights = time / 8f; float timeFourth = time / 4f; float timeHalf = time / 2f; // Time values Vector4 timeVector = time * new Vector4(1f / 20f, 1f, 2f, 3f); Vector4 sinTimeVector = new Vector4(Mathf.Sin(timeEights), Mathf.Sin(timeFourth), Mathf.Sin(timeHalf), Mathf.Sin(time)); Vector4 cosTimeVector = new Vector4(Mathf.Cos(timeEights), Mathf.Cos(timeFourth), Mathf.Cos(timeHalf), Mathf.Cos(time)); Vector4 deltaTimeVector = new Vector4(deltaTime, 1f / deltaTime, smoothDeltaTime, 1f / smoothDeltaTime); Vector4 timeParametersVector = new Vector4(time, Mathf.Sin(time), Mathf.Cos(time), 0.0f); cmd.SetGlobalVector(ShaderPropertyId.time, timeVector); cmd.SetGlobalVector(ShaderPropertyId.sinTime, sinTimeVector); cmd.SetGlobalVector(ShaderPropertyId.cosTime, cosTimeVector); cmd.SetGlobalVector(ShaderPropertyId.deltaTime, deltaTimeVector); cmd.SetGlobalVector(ShaderPropertyId.timeParameters, timeParametersVector); } /// /// Returns the camera color target for this renderer. /// It's only valid to call cameraColorTarget in the scope of ScriptableRenderPass. /// . /// public RenderTargetIdentifier cameraColorTarget { get { if (!(m_IsPipelineExecuting || isCameraColorTargetValid)) { Debug.LogWarning("You can only call cameraColorTarget inside the scope of a ScriptableRenderPass. Otherwise the pipeline camera target texture might have not been created or might have already been disposed."); // TODO: Ideally we should return an error texture (BuiltinRenderTextureType.None?) // but this might break some existing content, so we return the pipeline texture in the hope it gives a "soft" upgrade to users. } return m_CameraColorTarget; } } /// /// Returns the camera depth target for this renderer. /// It's only valid to call cameraDepthTarget in the scope of ScriptableRenderPass. /// . /// public RenderTargetIdentifier cameraDepthTarget { get { if (!m_IsPipelineExecuting) { Debug.LogWarning("You can only call cameraDepthTarget inside the scope of a ScriptableRenderPass. Otherwise the pipeline camera target texture might have not been created or might have already been disposed."); // TODO: Ideally we should return an error texture (BuiltinRenderTextureType.None?) // but this might break some existing content, so we return the pipeline texture in the hope it gives a "soft" upgrade to users. } return m_CameraDepthTarget; } } /// /// Returns a list of renderer features added to this renderer. /// /// protected List rendererFeatures { get => m_RendererFeatures; } /// /// Returns a list of render passes scheduled to be executed by this renderer. /// /// protected List activeRenderPassQueue { get => m_ActiveRenderPassQueue; } /// /// Supported rendering features by this renderer. /// /// public RenderingFeatures supportedRenderingFeatures { get; set; } = new RenderingFeatures(); /// /// List of unsupported Graphics APIs for this renderer. /// /// public GraphicsDeviceType[] unsupportedGraphicsDeviceTypes { get; set; } = new GraphicsDeviceType[0]; static class RenderPassBlock { // Executes render passes that are inputs to the main rendering // but don't depend on camera state. They all render in monoscopic mode. f.ex, shadow maps. public static readonly int BeforeRendering = 0; // Main bulk of render pass execution. They required camera state to be properly set // and when enabled they will render in stereo. public static readonly int MainRenderingOpaque = 1; public static readonly int MainRenderingTransparent = 2; // Execute after Post-processing. public static readonly int AfterRendering = 3; } const int k_RenderPassBlockCount = 4; List m_ActiveRenderPassQueue = new List(32); List m_RendererFeatures = new List(10); RenderTargetIdentifier m_CameraColorTarget; RenderTargetIdentifier m_CameraDepthTarget; bool m_FirstTimeCameraColorTargetIsBound = true; // flag used to track when m_CameraColorTarget should be cleared (if necessary), as well as other special actions only performed the first time m_CameraColorTarget is bound as a render target bool m_FirstTimeCameraDepthTargetIsBound = true; // flag used to track when m_CameraDepthTarget should be cleared (if necessary), the first time m_CameraDepthTarget is bound as a render target // The pipeline can only guarantee the camera target texture are valid when the pipeline is executing. // Trying to access the camera target before or after might be that the pipeline texture have already been disposed. bool m_IsPipelineExecuting = false; // This should be removed when early camera color target assignment is removed. internal bool isCameraColorTargetValid = false; static RenderTargetIdentifier[] m_ActiveColorAttachments = new RenderTargetIdentifier[] {0, 0, 0, 0, 0, 0, 0, 0 }; static RenderTargetIdentifier m_ActiveDepthAttachment; // CommandBuffer.SetRenderTarget(RenderTargetIdentifier[] colors, RenderTargetIdentifier depth, int mipLevel, CubemapFace cubemapFace, int depthSlice); // called from CoreUtils.SetRenderTarget will issue a warning assert from native c++ side if "colors" array contains some invalid RTIDs. // To avoid that warning assert we trim the RenderTargetIdentifier[] arrays we pass to CoreUtils.SetRenderTarget. // To avoid re-allocating a new array every time we do that, we re-use one of these arrays: static RenderTargetIdentifier[][] m_TrimmedColorAttachmentCopies = new RenderTargetIdentifier[][] { new RenderTargetIdentifier[0], // m_TrimmedColorAttachmentCopies[0] is an array of 0 RenderTargetIdentifier - only used to make indexing code easier to read new RenderTargetIdentifier[] {0}, // m_TrimmedColorAttachmentCopies[1] is an array of 1 RenderTargetIdentifier new RenderTargetIdentifier[] {0, 0}, // m_TrimmedColorAttachmentCopies[2] is an array of 2 RenderTargetIdentifiers new RenderTargetIdentifier[] {0, 0, 0}, // m_TrimmedColorAttachmentCopies[3] is an array of 3 RenderTargetIdentifiers new RenderTargetIdentifier[] {0, 0, 0, 0}, // m_TrimmedColorAttachmentCopies[4] is an array of 4 RenderTargetIdentifiers new RenderTargetIdentifier[] {0, 0, 0, 0, 0}, // m_TrimmedColorAttachmentCopies[5] is an array of 5 RenderTargetIdentifiers new RenderTargetIdentifier[] {0, 0, 0, 0, 0, 0}, // m_TrimmedColorAttachmentCopies[6] is an array of 6 RenderTargetIdentifiers new RenderTargetIdentifier[] {0, 0, 0, 0, 0, 0, 0}, // m_TrimmedColorAttachmentCopies[7] is an array of 7 RenderTargetIdentifiers new RenderTargetIdentifier[] {0, 0, 0, 0, 0, 0, 0, 0 }, // m_TrimmedColorAttachmentCopies[8] is an array of 8 RenderTargetIdentifiers }; internal static void ConfigureActiveTarget(RenderTargetIdentifier colorAttachment, RenderTargetIdentifier depthAttachment) { m_ActiveColorAttachments[0] = colorAttachment; for (int i = 1; i < m_ActiveColorAttachments.Length; ++i) m_ActiveColorAttachments[i] = 0; m_ActiveDepthAttachment = depthAttachment; } public ScriptableRenderer(ScriptableRendererData data) { profilingExecute = new ProfilingSampler($"{nameof(ScriptableRenderer)}.{nameof(ScriptableRenderer.Execute)}: {data.name}"); foreach (var feature in data.rendererFeatures) { if (feature == null) continue; feature.Create(); m_RendererFeatures.Add(feature); } Clear(CameraRenderType.Base); m_ActiveRenderPassQueue.Clear(); } public void Dispose() { // Dispose all renderer features... for (int i = 0; i < m_RendererFeatures.Count; ++i) { if (rendererFeatures[i] == null) continue; rendererFeatures[i].Dispose(); } Dispose(true); GC.SuppressFinalize(this); } protected virtual void Dispose(bool disposing) { } /// /// Configures the camera target. /// /// Camera color target. Pass BuiltinRenderTextureType.CameraTarget if rendering to backbuffer. /// Camera depth target. Pass BuiltinRenderTextureType.CameraTarget if color has depth or rendering to backbuffer. public void ConfigureCameraTarget(RenderTargetIdentifier colorTarget, RenderTargetIdentifier depthTarget) { m_CameraColorTarget = colorTarget; m_CameraDepthTarget = depthTarget; } // This should be removed when early camera color target assignment is removed. internal void ConfigureCameraColorTarget(RenderTargetIdentifier colorTarget) { m_CameraColorTarget = colorTarget; } /// /// Configures the render passes that will execute for this renderer. /// This method is called per-camera every frame. /// /// Use this render context to issue any draw commands during execution. /// Current render state information. /// /// public abstract void Setup(ScriptableRenderContext context, ref RenderingData renderingData); /// /// Override this method to implement the lighting setup for the renderer. You can use this to /// compute and upload light CBUFFER for example. /// /// Use this render context to issue any draw commands during execution. /// Current render state information. public virtual void SetupLights(ScriptableRenderContext context, ref RenderingData renderingData) { } /// /// Override this method to configure the culling parameters for the renderer. You can use this to configure if /// lights should be culled per-object or the maximum shadow distance for example. /// /// Use this to change culling parameters used by the render pipeline. /// Current render state information. public virtual void SetupCullingParameters(ref ScriptableCullingParameters cullingParameters, ref CameraData cameraData) { } /// /// Called upon finishing rendering the camera stack. You can release any resources created by the renderer here. /// /// public virtual void FinishRendering(CommandBuffer cmd) { } /// /// Execute the enqueued render passes. This automatically handles editor and stereo rendering. /// /// Use this render context to issue any draw commands during execution. /// Current render state information. public void Execute(ScriptableRenderContext context, ref RenderingData renderingData) { m_IsPipelineExecuting = true; ref CameraData cameraData = ref renderingData.cameraData; Camera camera = cameraData.camera; CommandBuffer cmd = CommandBufferPool.Get(); // TODO: move skybox code from C++ to URP in order to remove the call to context.Submit() inside DrawSkyboxPass // Until then, we can't use nested profiling scopes with XR multipass CommandBuffer cmdScope = renderingData.cameraData.xr.enabled ? null : cmd; using (new ProfilingScope(cmdScope, profilingExecute)) { InternalStartRendering(context, ref renderingData); // Cache the time for after the call to `SetupCameraProperties` and set the time variables in shader // For now we set the time variables per camera, as we plan to remove `SetupCameraProperties`. // Setting the time per frame would take API changes to pass the variable to each camera render. // Once `SetupCameraProperties` is gone, the variable should be set higher in the call-stack. #if UNITY_EDITOR float time = Application.isPlaying ? Time.time : Time.realtimeSinceStartup; #else float time = Time.time; #endif float deltaTime = Time.deltaTime; float smoothDeltaTime = Time.smoothDeltaTime; // Initialize Camera Render State ClearRenderingState(cmd); SetPerCameraShaderVariables(cmd, ref cameraData); SetShaderTimeValues(cmd, time, deltaTime, smoothDeltaTime); context.ExecuteCommandBuffer(cmd); cmd.Clear(); using (new ProfilingScope(cmd, Profiling.sortRenderPasses)) { // Sort the render pass queue SortStable(m_ActiveRenderPassQueue); } using var renderBlocks = new RenderBlocks(m_ActiveRenderPassQueue); using (new ProfilingScope(cmd, Profiling.setupLights)) { SetupLights(context, ref renderingData); } using (new ProfilingScope(cmd, Profiling.RenderBlock.beforeRendering)) { // Before Render Block. This render blocks always execute in mono rendering. // Camera is not setup. Lights are not setup. // Used to render input textures like shadowmaps. ExecuteBlock(RenderPassBlock.BeforeRendering, in renderBlocks, context, ref renderingData); } using (new ProfilingScope(cmd, Profiling.setupCamera)) { // This is still required because of the following reasons: // - Camera billboard properties. // - Camera frustum planes: unity_CameraWorldClipPlanes[6] // - _ProjectionParams.x logic is deep inside GfxDevice // NOTE: The only reason we have to call this here and not at the beginning (before shadows) // is because this need to be called for each eye in multi pass VR. // The side effect is that this will override some shader properties we already setup and we will have to // reset them. context.SetupCameraProperties(camera); SetCameraMatrices(cmd, ref cameraData, true); // Reset shader time variables as they were overridden in SetupCameraProperties. If we don't do it we might have a mismatch between shadows and main rendering SetShaderTimeValues(cmd, time, deltaTime, smoothDeltaTime); #if VISUAL_EFFECT_GRAPH_0_0_1_OR_NEWER //Triggers dispatch per camera, all global parameters should have been setup at this stage. VFX.VFXManager.ProcessCameraCommand(camera, cmd); #endif } context.ExecuteCommandBuffer(cmd); cmd.Clear(); BeginXRRendering(cmd, context, ref renderingData.cameraData); // In the opaque and transparent blocks the main rendering executes. // Opaque blocks... if (renderBlocks.GetLength(RenderPassBlock.MainRenderingOpaque) > 0) { using var profScope = new ProfilingScope(cmd, Profiling.RenderBlock.mainRenderingOpaque); ExecuteBlock(RenderPassBlock.MainRenderingOpaque, in renderBlocks, context, ref renderingData); } // Transparent blocks... if (renderBlocks.GetLength(RenderPassBlock.MainRenderingTransparent) > 0) { using var profScope = new ProfilingScope(cmd, Profiling.RenderBlock.mainRenderingTransparent); ExecuteBlock(RenderPassBlock.MainRenderingTransparent, in renderBlocks, context, ref renderingData); } // Draw Gizmos... DrawGizmos(context, camera, GizmoSubset.PreImageEffects); // In this block after rendering drawing happens, e.g, post processing, video player capture. if (renderBlocks.GetLength(RenderPassBlock.AfterRendering) > 0) { using var profScope = new ProfilingScope(cmd, Profiling.RenderBlock.afterRendering); ExecuteBlock(RenderPassBlock.AfterRendering, in renderBlocks, context, ref renderingData); } EndXRRendering(cmd, context, ref renderingData.cameraData); DrawWireOverlay(context, camera); DrawGizmos(context, camera, GizmoSubset.PostImageEffects); InternalFinishRendering(context, cameraData.resolveFinalTarget); } context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); } /// /// Enqueues a render pass for execution. /// /// Render pass to be enqueued. public void EnqueuePass(ScriptableRenderPass pass) { m_ActiveRenderPassQueue.Add(pass); } /// /// Returns a clear flag based on CameraClearFlags. /// /// Camera clear flags. /// A clear flag that tells if color and/or depth should be cleared. protected static ClearFlag GetCameraClearFlag(ref CameraData cameraData) { var cameraClearFlags = cameraData.camera.clearFlags; // Universal RP doesn't support CameraClearFlags.DepthOnly and CameraClearFlags.Nothing. // CameraClearFlags.DepthOnly has the same effect of CameraClearFlags.SolidColor // CameraClearFlags.Nothing clears Depth on PC/Desktop and in mobile it clears both // depth and color. // CameraClearFlags.Skybox clears depth only. // Implementation details: // Camera clear flags are used to initialize the attachments on the first render pass. // ClearFlag is used together with Tile Load action to figure out how to clear the camera render target. // In Tile Based GPUs ClearFlag.Depth + RenderBufferLoadAction.DontCare becomes DontCare load action. // While ClearFlag.All + RenderBufferLoadAction.DontCare become Clear load action. // In mobile we force ClearFlag.All as DontCare doesn't have noticeable perf. difference from Clear // and this avoid tile clearing issue when not rendering all pixels in some GPUs. // In desktop/consoles there's actually performance difference between DontCare and Clear. // RenderBufferLoadAction.DontCare in PC/Desktop behaves as not clearing screen // RenderBufferLoadAction.DontCare in Vulkan/Metal behaves as DontCare load action // RenderBufferLoadAction.DontCare in GLES behaves as glInvalidateBuffer // Overlay cameras composite on top of previous ones. They don't clear color. // For overlay cameras we check if depth should be cleared on not. if (cameraData.renderType == CameraRenderType.Overlay) return (cameraData.clearDepth) ? ClearFlag.Depth : ClearFlag.None; // Always clear on first render pass in mobile as it's same perf of DontCare and avoid tile clearing issues. if (Application.isMobilePlatform) return ClearFlag.All; if ((cameraClearFlags == CameraClearFlags.Skybox && RenderSettings.skybox != null) || cameraClearFlags == CameraClearFlags.Nothing) return ClearFlag.Depth; return ClearFlag.All; } /// /// Calls AddRenderPasses for each feature added to this renderer. /// /// /// protected void AddRenderPasses(ref RenderingData renderingData) { using var profScope = new ProfilingScope(null, Profiling.addRenderPasses); // Add render passes from custom renderer features for (int i = 0; i < rendererFeatures.Count; ++i) { if (!rendererFeatures[i].isActive) { continue; } rendererFeatures[i].AddRenderPasses(this, ref renderingData); } // Remove any null render pass that might have been added by user by mistake int count = activeRenderPassQueue.Count; for (int i = count - 1; i >= 0; i--) { if (activeRenderPassQueue[i] == null) activeRenderPassQueue.RemoveAt(i); } } void ClearRenderingState(CommandBuffer cmd) { using var profScope = new ProfilingScope(cmd, Profiling.clearRenderingState); // Reset per-camera shader keywords. They are enabled depending on which render passes are executed. cmd.DisableShaderKeyword(ShaderKeywordStrings.MainLightShadows); cmd.DisableShaderKeyword(ShaderKeywordStrings.MainLightShadowCascades); cmd.DisableShaderKeyword(ShaderKeywordStrings.MainLightShadowScreen); cmd.DisableShaderKeyword(ShaderKeywordStrings.AdditionalLightsVertex); cmd.DisableShaderKeyword(ShaderKeywordStrings.AdditionalLightsPixel); cmd.DisableShaderKeyword(ShaderKeywordStrings.AdditionalLightShadows); cmd.DisableShaderKeyword(ShaderKeywordStrings.SoftShadows); cmd.DisableShaderKeyword(ShaderKeywordStrings.MixedLightingSubtractive); // Backward compatibility cmd.DisableShaderKeyword(ShaderKeywordStrings.LightmapShadowMixing); cmd.DisableShaderKeyword(ShaderKeywordStrings.ShadowsShadowMask); cmd.DisableShaderKeyword(ShaderKeywordStrings.LinearToSRGBConversion); } internal void Clear(CameraRenderType cameraType) { m_ActiveColorAttachments[0] = BuiltinRenderTextureType.CameraTarget; for (int i = 1; i < m_ActiveColorAttachments.Length; ++i) m_ActiveColorAttachments[i] = 0; m_ActiveDepthAttachment = BuiltinRenderTextureType.CameraTarget; m_FirstTimeCameraColorTargetIsBound = cameraType == CameraRenderType.Base; m_FirstTimeCameraDepthTargetIsBound = true; m_CameraColorTarget = BuiltinRenderTextureType.CameraTarget; m_CameraDepthTarget = BuiltinRenderTextureType.CameraTarget; } void ExecuteBlock(int blockIndex, in RenderBlocks renderBlocks, ScriptableRenderContext context, ref RenderingData renderingData, bool submit = false) { foreach (int currIndex in renderBlocks.GetRange(blockIndex)) { var renderPass = m_ActiveRenderPassQueue[currIndex]; ExecuteRenderPass(context, renderPass, ref renderingData); } if (submit) context.Submit(); } void ExecuteRenderPass(ScriptableRenderContext context, ScriptableRenderPass renderPass, ref RenderingData renderingData) { using var profScope = new ProfilingScope(null, renderPass.profilingSampler); ref CameraData cameraData = ref renderingData.cameraData; CommandBuffer cmd = CommandBufferPool.Get(); // Track CPU only as GPU markers for this scope were "too noisy". using (new ProfilingScope(cmd, Profiling.RenderPass.configure)) { renderPass.Configure(cmd, cameraData.cameraTargetDescriptor); SetRenderPassAttachments(cmd, renderPass, ref cameraData); } // Also, we execute the commands recorded at this point to ensure SetRenderTarget is called before RenderPass.Execute context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); renderPass.Execute(context, ref renderingData); } void SetRenderPassAttachments(CommandBuffer cmd, ScriptableRenderPass renderPass, ref CameraData cameraData) { Camera camera = cameraData.camera; ClearFlag cameraClearFlag = GetCameraClearFlag(ref cameraData); // Invalid configuration - use current attachment setup // Note: we only check color buffers. This is only technically correct because for shadowmaps and depth only passes // we bind depth as color and Unity handles it underneath. so we never have a situation that all color buffers are null and depth is bound. uint validColorBuffersCount = RenderingUtils.GetValidColorBufferCount(renderPass.colorAttachments); if (validColorBuffersCount == 0) return; // We use a different code path for MRT since it calls a different version of API SetRenderTarget if (RenderingUtils.IsMRT(renderPass.colorAttachments)) { // In the MRT path we assume that all color attachments are REAL color attachments, // and that the depth attachment is a REAL depth attachment too. // Determine what attachments need to be cleared. ---------------- bool needCustomCameraColorClear = false; bool needCustomCameraDepthClear = false; int cameraColorTargetIndex = RenderingUtils.IndexOf(renderPass.colorAttachments, m_CameraColorTarget); if (cameraColorTargetIndex != -1 && (m_FirstTimeCameraColorTargetIsBound)) { m_FirstTimeCameraColorTargetIsBound = false; // register that we did clear the camera target the first time it was bound // Overlay cameras composite on top of previous ones. They don't clear. // MTT: Commented due to not implemented yet // if (renderingData.cameraData.renderType == CameraRenderType.Overlay) // clearFlag = ClearFlag.None; // We need to specifically clear the camera color target. // But there is still a chance we don't need to issue individual clear() on each render-targets if they all have the same clear parameters. needCustomCameraColorClear = (cameraClearFlag & ClearFlag.Color) != (renderPass.clearFlag & ClearFlag.Color) || CoreUtils.ConvertSRGBToActiveColorSpace(camera.backgroundColor) != renderPass.clearColor; } // Note: if we have to give up the assumption that no depthTarget can be included in the MRT colorAttachments, we might need something like this: // int cameraTargetDepthIndex = IndexOf(renderPass.colorAttachments, m_CameraDepthTarget); // if( !renderTargetAlreadySet && cameraTargetDepthIndex != -1 && m_FirstTimeCameraDepthTargetIsBound) // { ... // } if (renderPass.depthAttachment == m_CameraDepthTarget && m_FirstTimeCameraDepthTargetIsBound) { m_FirstTimeCameraDepthTargetIsBound = false; needCustomCameraDepthClear = (cameraClearFlag & ClearFlag.Depth) != (renderPass.clearFlag & ClearFlag.Depth); } // Perform all clear operations needed. ---------------- // We try to minimize calls to SetRenderTarget(). // We get here only if cameraColorTarget needs to be handled separately from the rest of the color attachments. if (needCustomCameraColorClear) { // Clear camera color render-target separately from the rest of the render-targets. if ((cameraClearFlag & ClearFlag.Color) != 0) SetRenderTarget(cmd, renderPass.colorAttachments[cameraColorTargetIndex], renderPass.depthAttachment, ClearFlag.Color, CoreUtils.ConvertSRGBToActiveColorSpace(camera.backgroundColor)); if ((renderPass.clearFlag & ClearFlag.Color) != 0) { uint otherTargetsCount = RenderingUtils.CountDistinct(renderPass.colorAttachments, m_CameraColorTarget); var nonCameraAttachments = m_TrimmedColorAttachmentCopies[otherTargetsCount]; int writeIndex = 0; for (int readIndex = 0; readIndex < renderPass.colorAttachments.Length; ++readIndex) { if (renderPass.colorAttachments[readIndex] != m_CameraColorTarget && renderPass.colorAttachments[readIndex] != 0) { nonCameraAttachments[writeIndex] = renderPass.colorAttachments[readIndex]; ++writeIndex; } } if (writeIndex != otherTargetsCount) Debug.LogError("writeIndex and otherTargetsCount values differed. writeIndex:" + writeIndex + " otherTargetsCount:" + otherTargetsCount); SetRenderTarget(cmd, nonCameraAttachments, m_CameraDepthTarget, ClearFlag.Color, renderPass.clearColor); } } // Bind all attachments, clear color only if there was no custom behaviour for cameraColorTarget, clear depth as needed. ClearFlag finalClearFlag = ClearFlag.None; finalClearFlag |= needCustomCameraDepthClear ? (cameraClearFlag & ClearFlag.Depth) : (renderPass.clearFlag & ClearFlag.Depth); finalClearFlag |= needCustomCameraColorClear ? 0 : (renderPass.clearFlag & ClearFlag.Color); // Only setup render target if current render pass attachments are different from the active ones. if (!RenderingUtils.SequenceEqual(renderPass.colorAttachments, m_ActiveColorAttachments) || renderPass.depthAttachment != m_ActiveDepthAttachment || finalClearFlag != ClearFlag.None) { int lastValidRTindex = RenderingUtils.LastValid(renderPass.colorAttachments); if (lastValidRTindex >= 0) { int rtCount = lastValidRTindex + 1; var trimmedAttachments = m_TrimmedColorAttachmentCopies[rtCount]; for (int i = 0; i < rtCount; ++i) trimmedAttachments[i] = renderPass.colorAttachments[i]; SetRenderTarget(cmd, trimmedAttachments, renderPass.depthAttachment, finalClearFlag, renderPass.clearColor); #if ENABLE_VR && ENABLE_XR_MODULE if (cameraData.xr.enabled) { // SetRenderTarget might alter the internal device state(winding order). // Non-stereo buffer is already updated internally when switching render target. We update stereo buffers here to keep the consistency. int xrTargetIndex = RenderingUtils.IndexOf(renderPass.colorAttachments, cameraData.xr.renderTarget); bool isRenderToBackBufferTarget = (xrTargetIndex != -1) && !cameraData.xr.renderTargetIsRenderTexture; cameraData.xr.UpdateGPUViewAndProjectionMatrices(cmd, ref cameraData, !isRenderToBackBufferTarget); } #endif } } } else { // Currently in non-MRT case, color attachment can actually be a depth attachment. RenderTargetIdentifier passColorAttachment = renderPass.colorAttachment; RenderTargetIdentifier passDepthAttachment = renderPass.depthAttachment; // When render pass doesn't call ConfigureTarget we assume it's expected to render to camera target // which might be backbuffer or the framebuffer render textures. if (!renderPass.overrideCameraTarget) { // Default render pass attachment for passes before main rendering is current active // early return so we don't change current render target setup. if (renderPass.renderPassEvent < RenderPassEvent.BeforeRenderingOpaques) return; // Otherwise default is the pipeline camera target. passColorAttachment = m_CameraColorTarget; passDepthAttachment = m_CameraDepthTarget; } ClearFlag finalClearFlag = ClearFlag.None; Color finalClearColor; if (passColorAttachment == m_CameraColorTarget && (m_FirstTimeCameraColorTargetIsBound)) { m_FirstTimeCameraColorTargetIsBound = false; // register that we did clear the camera target the first time it was bound finalClearFlag |= (cameraClearFlag & ClearFlag.Color); finalClearColor = CoreUtils.ConvertSRGBToActiveColorSpace(camera.backgroundColor); if (m_FirstTimeCameraDepthTargetIsBound) { // m_CameraColorTarget can be an opaque pointer to a RenderTexture with depth-surface. // We cannot infer this information here, so we must assume both camera color and depth are first-time bound here (this is the legacy behaviour). m_FirstTimeCameraDepthTargetIsBound = false; finalClearFlag |= (cameraClearFlag & ClearFlag.Depth); } } else { finalClearFlag |= (renderPass.clearFlag & ClearFlag.Color); finalClearColor = renderPass.clearColor; } // Condition (m_CameraDepthTarget!=BuiltinRenderTextureType.CameraTarget) below prevents m_FirstTimeCameraDepthTargetIsBound flag from being reset during non-camera passes (such as Color Grading LUT). This ensures that in those cases, cameraDepth will actually be cleared during the later camera pass. if ((m_CameraDepthTarget != BuiltinRenderTextureType.CameraTarget) && (passDepthAttachment == m_CameraDepthTarget || passColorAttachment == m_CameraDepthTarget) && m_FirstTimeCameraDepthTargetIsBound) { m_FirstTimeCameraDepthTargetIsBound = false; finalClearFlag |= (cameraClearFlag & ClearFlag.Depth); // finalClearFlag |= (cameraClearFlag & ClearFlag.Color); // <- m_CameraDepthTarget is never a color-surface, so no need to add this here. } else finalClearFlag |= (renderPass.clearFlag & ClearFlag.Depth); // Only setup render target if current render pass attachments are different from the active ones if (passColorAttachment != m_ActiveColorAttachments[0] || passDepthAttachment != m_ActiveDepthAttachment || finalClearFlag != ClearFlag.None) { SetRenderTarget(cmd, passColorAttachment, passDepthAttachment, finalClearFlag, finalClearColor); #if ENABLE_VR && ENABLE_XR_MODULE if (cameraData.xr.enabled) { // SetRenderTarget might alter the internal device state(winding order). // Non-stereo buffer is already updated internally when switching render target. We update stereo buffers here to keep the consistency. bool isRenderToBackBufferTarget = (passColorAttachment == cameraData.xr.renderTarget) && !cameraData.xr.renderTargetIsRenderTexture; cameraData.xr.UpdateGPUViewAndProjectionMatrices(cmd, ref cameraData, !isRenderToBackBufferTarget); } #endif } } } void BeginXRRendering(CommandBuffer cmd, ScriptableRenderContext context, ref CameraData cameraData) { #if ENABLE_VR && ENABLE_XR_MODULE if (cameraData.xr.enabled) { cameraData.xr.StartSinglePass(cmd); cmd.EnableShaderKeyword(ShaderKeywordStrings.UseDrawProcedural); context.ExecuteCommandBuffer(cmd); cmd.Clear(); } #endif } void EndXRRendering(CommandBuffer cmd, ScriptableRenderContext context, ref CameraData cameraData) { #if ENABLE_VR && ENABLE_XR_MODULE if (cameraData.xr.enabled) { cameraData.xr.StopSinglePass(cmd); cmd.DisableShaderKeyword(ShaderKeywordStrings.UseDrawProcedural); context.ExecuteCommandBuffer(cmd); cmd.Clear(); } #endif } internal static void SetRenderTarget(CommandBuffer cmd, RenderTargetIdentifier colorAttachment, RenderTargetIdentifier depthAttachment, ClearFlag clearFlag, Color clearColor) { m_ActiveColorAttachments[0] = colorAttachment; for (int i = 1; i < m_ActiveColorAttachments.Length; ++i) m_ActiveColorAttachments[i] = 0; m_ActiveDepthAttachment = depthAttachment; RenderBufferLoadAction colorLoadAction = ((uint)clearFlag & (uint)ClearFlag.Color) != 0 ? RenderBufferLoadAction.DontCare : RenderBufferLoadAction.Load; RenderBufferLoadAction depthLoadAction = ((uint)clearFlag & (uint)ClearFlag.Depth) != 0 ? RenderBufferLoadAction.DontCare : RenderBufferLoadAction.Load; SetRenderTarget(cmd, colorAttachment, colorLoadAction, RenderBufferStoreAction.Store, depthAttachment, depthLoadAction, RenderBufferStoreAction.Store, clearFlag, clearColor); } static void SetRenderTarget( CommandBuffer cmd, RenderTargetIdentifier colorAttachment, RenderBufferLoadAction colorLoadAction, RenderBufferStoreAction colorStoreAction, ClearFlag clearFlags, Color clearColor) { CoreUtils.SetRenderTarget(cmd, colorAttachment, colorLoadAction, colorStoreAction, clearFlags, clearColor); } static void SetRenderTarget( CommandBuffer cmd, RenderTargetIdentifier colorAttachment, RenderBufferLoadAction colorLoadAction, RenderBufferStoreAction colorStoreAction, RenderTargetIdentifier depthAttachment, RenderBufferLoadAction depthLoadAction, RenderBufferStoreAction depthStoreAction, ClearFlag clearFlags, Color clearColor) { // XRTODO: Revisit the logic. Why treat CameraTarget depth specially? if (depthAttachment == BuiltinRenderTextureType.CameraTarget) { SetRenderTarget(cmd, colorAttachment, colorLoadAction, colorStoreAction, clearFlags, clearColor); } else { CoreUtils.SetRenderTarget(cmd, colorAttachment, colorLoadAction, colorStoreAction, depthAttachment, depthLoadAction, depthStoreAction, clearFlags, clearColor); } } static void SetRenderTarget(CommandBuffer cmd, RenderTargetIdentifier[] colorAttachments, RenderTargetIdentifier depthAttachment, ClearFlag clearFlag, Color clearColor) { m_ActiveColorAttachments = colorAttachments; m_ActiveDepthAttachment = depthAttachment; CoreUtils.SetRenderTarget(cmd, colorAttachments, depthAttachment, clearFlag, clearColor); } [Conditional("UNITY_EDITOR")] void DrawGizmos(ScriptableRenderContext context, Camera camera, GizmoSubset gizmoSubset) { #if UNITY_EDITOR if (UnityEditor.Handles.ShouldRenderGizmos()) context.DrawGizmos(camera, gizmoSubset); #endif } [Conditional("UNITY_EDITOR")] void DrawWireOverlay(ScriptableRenderContext context, Camera camera) { context.DrawWireOverlay(camera); } void InternalStartRendering(ScriptableRenderContext context, ref RenderingData renderingData) { CommandBuffer cmd = CommandBufferPool.Get(); using (new ProfilingScope(cmd, Profiling.internalStartRendering)) { for (int i = 0; i < m_ActiveRenderPassQueue.Count; ++i) { m_ActiveRenderPassQueue[i].OnCameraSetup(cmd, ref renderingData); } } context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); } void InternalFinishRendering(ScriptableRenderContext context, bool resolveFinalTarget) { CommandBuffer cmd = CommandBufferPool.Get(); using (new ProfilingScope(cmd, Profiling.internalFinishRendering)) { for (int i = 0; i < m_ActiveRenderPassQueue.Count; ++i) m_ActiveRenderPassQueue[i].FrameCleanup(cmd); // Happens when rendering the last camera in the camera stack. if (resolveFinalTarget) { for (int i = 0; i < m_ActiveRenderPassQueue.Count; ++i) m_ActiveRenderPassQueue[i].OnFinishCameraStackRendering(cmd); FinishRendering(cmd); // We finished camera stacking and released all intermediate pipeline textures. m_IsPipelineExecuting = false; } m_ActiveRenderPassQueue.Clear(); } context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); } internal static void SortStable(List list) { int j; for (int i = 1; i < list.Count; ++i) { ScriptableRenderPass curr = list[i]; j = i - 1; for (; j >= 0 && curr < list[j]; --j) list[j + 1] = list[j]; list[j + 1] = curr; } } internal struct RenderBlocks : IDisposable { private NativeArray m_BlockEventLimits; private NativeArray m_BlockRanges; private NativeArray m_BlockRangeLengths; public RenderBlocks(List activeRenderPassQueue) { // Upper limits for each block. Each block will contains render passes with events below the limit. m_BlockEventLimits = new NativeArray(k_RenderPassBlockCount, Allocator.Temp); m_BlockRanges = new NativeArray(m_BlockEventLimits.Length + 1, Allocator.Temp); m_BlockRangeLengths = new NativeArray(m_BlockRanges.Length, Allocator.Temp); m_BlockEventLimits[RenderPassBlock.BeforeRendering] = RenderPassEvent.BeforeRenderingPrePasses; m_BlockEventLimits[RenderPassBlock.MainRenderingOpaque] = RenderPassEvent.AfterRenderingOpaques; m_BlockEventLimits[RenderPassBlock.MainRenderingTransparent] = RenderPassEvent.AfterRenderingPostProcessing; m_BlockEventLimits[RenderPassBlock.AfterRendering] = (RenderPassEvent)Int32.MaxValue; // blockRanges[0] is always 0 // blockRanges[i] is the index of the first RenderPass found in m_ActiveRenderPassQueue that has a ScriptableRenderPass.renderPassEvent higher than blockEventLimits[i] (i.e, should be executed after blockEventLimits[i]) // blockRanges[blockEventLimits.Length] is m_ActiveRenderPassQueue.Count FillBlockRanges(activeRenderPassQueue); m_BlockEventLimits.Dispose(); for (int i = 0; i < m_BlockRanges.Length - 1; i++) { m_BlockRangeLengths[i] = m_BlockRanges[i + 1] - m_BlockRanges[i]; } } // RAII like Dispose pattern implementation for 'using' keyword public void Dispose() { m_BlockRangeLengths.Dispose(); m_BlockRanges.Dispose(); } // Fill in render pass indices for each block. End index is startIndex + 1. void FillBlockRanges(List activeRenderPassQueue) { int currRangeIndex = 0; int currRenderPass = 0; m_BlockRanges[currRangeIndex++] = 0; // For each block, it finds the first render pass index that has an event // higher than the block limit. for (int i = 0; i < m_BlockEventLimits.Length - 1; ++i) { while (currRenderPass < activeRenderPassQueue.Count && activeRenderPassQueue[currRenderPass].renderPassEvent < m_BlockEventLimits[i]) currRenderPass++; m_BlockRanges[currRangeIndex++] = currRenderPass; } m_BlockRanges[currRangeIndex] = activeRenderPassQueue.Count; } public int GetLength(int index) { return m_BlockRangeLengths[index]; } // Minimal foreach support public struct BlockRange : IDisposable { int m_Current; int m_End; public BlockRange(int begin, int end) { Assertions.Assert.IsTrue(begin <= end); m_Current = begin < end ? begin : end; m_End = end >= begin ? end : begin; m_Current -= 1; } public BlockRange GetEnumerator() { return this; } public bool MoveNext() { return ++m_Current < m_End; } public int Current { get => m_Current; } public void Dispose() {} } public BlockRange GetRange(int index) { return new BlockRange(m_BlockRanges[index], m_BlockRanges[index + 1]); } } } }