nuclear@0: /************************************************************************************ nuclear@0: nuclear@0: Filename : Util_Render_Stereo.cpp nuclear@0: Content : Stereo rendering configuration implementation nuclear@0: Created : October 22, 2012 nuclear@0: Authors : Michael Antonov, Andrew Reisse, Tom Forsyth nuclear@0: nuclear@0: Copyright : Copyright 2014 Oculus VR, LLC All Rights reserved. nuclear@0: nuclear@0: Licensed under the Oculus VR Rift SDK License Version 3.2 (the "License"); nuclear@0: you may not use the Oculus VR Rift SDK except in compliance with the License, nuclear@0: which is provided at the time of installation or download, or which nuclear@0: otherwise accompanies this software in either electronic or hard copy form. nuclear@0: nuclear@0: You may obtain a copy of the License at nuclear@0: nuclear@0: http://www.oculusvr.com/licenses/LICENSE-3.2 nuclear@0: nuclear@0: Unless required by applicable law or agreed to in writing, the Oculus VR SDK nuclear@0: distributed under the License is distributed on an "AS IS" BASIS, nuclear@0: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. nuclear@0: See the License for the specific language governing permissions and nuclear@0: limitations under the License. nuclear@0: nuclear@0: *************************************************************************************/ nuclear@0: nuclear@0: #include "Util_Render_Stereo.h" nuclear@0: nuclear@0: namespace OVR { namespace Util { namespace Render { nuclear@0: nuclear@0: using namespace OVR::Tracking; nuclear@0: nuclear@0: nuclear@0: //----------------------------------------------------------------------------------- nuclear@0: // **** Useful debug functions. nuclear@0: nuclear@0: char const* GetDebugNameEyeCupType ( EyeCupType eyeCupType ) nuclear@0: { nuclear@0: switch ( eyeCupType ) nuclear@0: { nuclear@0: case EyeCup_DK1A: return "DK1 A"; nuclear@0: case EyeCup_DK1B: return "DK1 B"; nuclear@0: case EyeCup_DK1C: return "DK1 C"; nuclear@0: case EyeCup_DKHD2A: return "DKHD2 A"; nuclear@0: case EyeCup_OrangeA: return "Orange A"; nuclear@0: case EyeCup_RedA: return "Red A"; nuclear@0: case EyeCup_PinkA: return "Pink A"; nuclear@0: case EyeCup_BlueA: return "Blue A"; nuclear@0: case EyeCup_Delilah1A: return "Delilah 1 A"; nuclear@0: case EyeCup_Delilah2A: return "Delilah 2 A"; nuclear@0: case EyeCup_JamesA: return "James A"; nuclear@0: case EyeCup_SunMandalaA: return "Sun Mandala A"; nuclear@0: case EyeCup_DK2A: return "DK2 A"; nuclear@0: case EyeCup_LAST: return "LAST"; nuclear@0: default: OVR_ASSERT ( false ); return "Error"; nuclear@0: } nuclear@0: } nuclear@0: nuclear@0: char const* GetDebugNameHmdType ( HmdTypeEnum hmdType ) nuclear@0: { nuclear@0: switch ( hmdType ) nuclear@0: { nuclear@0: case HmdType_None: return "None"; nuclear@0: case HmdType_DK1: return "DK1"; nuclear@0: case HmdType_DKProto: return "DK1 prototype"; nuclear@0: case HmdType_DKHDProto: return "DK HD prototype 1"; nuclear@0: case HmdType_DKHDProto566Mi: return "DK HD prototype 566 Mi"; nuclear@0: case HmdType_DKHD2Proto: return "DK HD prototype 585"; nuclear@0: case HmdType_CrystalCoveProto: return "Crystal Cove"; nuclear@0: case HmdType_DK2: return "DK2"; nuclear@0: case HmdType_Unknown: return "Unknown"; nuclear@0: case HmdType_LAST: return "LAST"; nuclear@0: default: OVR_ASSERT ( false ); return "Error"; nuclear@0: } nuclear@0: } nuclear@0: nuclear@0: nuclear@0: //----------------------------------------------------------------------------------- nuclear@0: // **** Internal pipeline functions. nuclear@0: nuclear@0: struct DistortionAndFov nuclear@0: { nuclear@0: DistortionRenderDesc Distortion; nuclear@0: FovPort Fov; nuclear@0: }; nuclear@0: nuclear@0: static DistortionAndFov CalculateDistortionAndFovInternal ( StereoEye eyeType, HmdRenderInfo const &hmd, nuclear@0: LensConfig const *pLensOverride = NULL, nuclear@0: FovPort const *pTanHalfFovOverride = NULL, nuclear@0: float extraEyeRotationInRadians = OVR_DEFAULT_EXTRA_EYE_ROTATION ) nuclear@0: { nuclear@0: // pLensOverride can be NULL, which means no override. nuclear@0: nuclear@0: DistortionRenderDesc localDistortion = CalculateDistortionRenderDesc ( eyeType, hmd, pLensOverride ); nuclear@0: FovPort fov = CalculateFovFromHmdInfo ( eyeType, localDistortion, hmd, extraEyeRotationInRadians ); nuclear@0: // Here the app or the user would optionally clamp this visible fov to a smaller number if nuclear@0: // they want more perf or resolution and are willing to give up FOV. nuclear@0: // They may also choose to clamp UDLR differently e.g. to get cinemascope-style views. nuclear@0: if ( pTanHalfFovOverride != NULL ) nuclear@0: { nuclear@0: fov = *pTanHalfFovOverride; nuclear@0: } nuclear@0: nuclear@0: // Here we could call ClampToPhysicalScreenFov(), but we do want people nuclear@0: // to be able to play with larger-than-screen views. nuclear@0: // The calling app can always do the clamping itself. nuclear@0: DistortionAndFov result; nuclear@0: result.Distortion = localDistortion; nuclear@0: result.Fov = fov; nuclear@0: nuclear@0: return result; nuclear@0: } nuclear@0: nuclear@0: nuclear@0: static Recti CalculateViewportInternal ( StereoEye eyeType, nuclear@0: Sizei const actualRendertargetSurfaceSize, nuclear@0: Sizei const requestedRenderedPixelSize, nuclear@0: bool bRendertargetSharedByBothEyes, nuclear@0: bool bMonoRenderingMode = false ) nuclear@0: { nuclear@0: Recti renderedViewport; nuclear@0: if ( bMonoRenderingMode || !bRendertargetSharedByBothEyes || (eyeType == StereoEye_Center) ) nuclear@0: { nuclear@0: // One eye per RT. nuclear@0: renderedViewport.x = 0; nuclear@0: renderedViewport.y = 0; nuclear@0: renderedViewport.w = Alg::Min ( actualRendertargetSurfaceSize.w, requestedRenderedPixelSize.w ); nuclear@0: renderedViewport.h = Alg::Min ( actualRendertargetSurfaceSize.h, requestedRenderedPixelSize.h ); nuclear@0: } nuclear@0: else nuclear@0: { nuclear@0: // Both eyes share the RT. nuclear@0: renderedViewport.x = 0; nuclear@0: renderedViewport.y = 0; nuclear@0: renderedViewport.w = Alg::Min ( actualRendertargetSurfaceSize.w/2, requestedRenderedPixelSize.w ); nuclear@0: renderedViewport.h = Alg::Min ( actualRendertargetSurfaceSize.h, requestedRenderedPixelSize.h ); nuclear@0: if ( eyeType == StereoEye_Right ) nuclear@0: { nuclear@0: renderedViewport.x = (actualRendertargetSurfaceSize.w+1)/2; // Round up, not down. nuclear@0: } nuclear@0: } nuclear@0: return renderedViewport; nuclear@0: } nuclear@0: nuclear@0: static Recti CalculateViewportDensityInternal ( StereoEye eyeType, nuclear@0: DistortionRenderDesc const &distortion, nuclear@0: FovPort const &fov, nuclear@0: Sizei const &actualRendertargetSurfaceSize, nuclear@0: bool bRendertargetSharedByBothEyes, nuclear@0: float desiredPixelDensity = 1.0f, nuclear@0: bool bMonoRenderingMode = false ) nuclear@0: { nuclear@0: OVR_ASSERT ( actualRendertargetSurfaceSize.w > 0 ); nuclear@0: OVR_ASSERT ( actualRendertargetSurfaceSize.h > 0 ); nuclear@0: nuclear@0: // What size RT do we need to get 1:1 mapping? nuclear@0: Sizei idealPixelSize = CalculateIdealPixelSize ( eyeType, distortion, fov, desiredPixelDensity ); nuclear@0: // ...but we might not actually get that size. nuclear@0: return CalculateViewportInternal ( eyeType, nuclear@0: actualRendertargetSurfaceSize, nuclear@0: idealPixelSize, nuclear@0: bRendertargetSharedByBothEyes, bMonoRenderingMode ); nuclear@0: } nuclear@0: nuclear@0: static ViewportScaleAndOffset CalculateViewportScaleAndOffsetInternal ( nuclear@0: ScaleAndOffset2D const &eyeToSourceNDC, nuclear@0: Recti const &renderedViewport, nuclear@0: Sizei const &actualRendertargetSurfaceSize ) nuclear@0: { nuclear@0: ViewportScaleAndOffset result; nuclear@0: result.RenderedViewport = renderedViewport; nuclear@0: result.EyeToSourceUV = CreateUVScaleAndOffsetfromNDCScaleandOffset( nuclear@0: eyeToSourceNDC, renderedViewport, actualRendertargetSurfaceSize ); nuclear@0: return result; nuclear@0: } nuclear@0: nuclear@0: nuclear@0: static StereoEyeParams CalculateStereoEyeParamsInternal ( StereoEye eyeType, HmdRenderInfo const &hmd, nuclear@0: DistortionRenderDesc const &distortion, nuclear@0: FovPort const &fov, nuclear@0: Sizei const &actualRendertargetSurfaceSize, nuclear@0: Recti const &renderedViewport, nuclear@0: bool bRightHanded = true, float zNear = 0.01f, float zFar = 10000.0f, nuclear@0: bool bMonoRenderingMode = false, nuclear@0: float zoomFactor = 1.0f ) nuclear@0: { nuclear@0: // Generate the projection matrix for intermediate rendertarget. nuclear@0: // Z range can also be inserted later by the app (though not in this particular case) nuclear@0: float fovScale = 1.0f / zoomFactor; nuclear@0: FovPort zoomedFov = fov; nuclear@0: zoomedFov.LeftTan *= fovScale; nuclear@0: zoomedFov.RightTan *= fovScale; nuclear@0: zoomedFov.UpTan *= fovScale; nuclear@0: zoomedFov.DownTan *= fovScale; nuclear@0: Matrix4f projection = CreateProjection ( bRightHanded, zoomedFov, zNear, zFar ); nuclear@0: nuclear@0: // Find the mapping from TanAngle space to target NDC space. nuclear@0: // Note this does NOT take the zoom factor into account because nuclear@0: // this is the mapping of actual physical eye FOV (and our eyes do not zoom!) nuclear@0: // to screen space. nuclear@0: ScaleAndOffset2D eyeToSourceNDC = CreateNDCScaleAndOffsetFromFov ( fov ); nuclear@0: nuclear@0: // The size of the final FB, which is fixed and determined by the physical size of the device display. nuclear@0: Recti distortedViewport = GetFramebufferViewport ( eyeType, hmd ); nuclear@0: Vector3f virtualCameraOffset = CalculateEyeVirtualCameraOffset(hmd, eyeType, bMonoRenderingMode); nuclear@0: nuclear@0: StereoEyeParams result; nuclear@0: result.Eye = eyeType; nuclear@0: result.HmdToEyeViewOffset = Matrix4f::Translation(virtualCameraOffset); nuclear@0: result.Distortion = distortion; nuclear@0: result.DistortionViewport = distortedViewport; nuclear@0: result.Fov = fov; nuclear@0: result.RenderedProjection = projection; nuclear@0: result.EyeToSourceNDC = eyeToSourceNDC; nuclear@0: ViewportScaleAndOffset vsao = CalculateViewportScaleAndOffsetInternal ( eyeToSourceNDC, renderedViewport, actualRendertargetSurfaceSize ); nuclear@0: result.RenderedViewport = vsao.RenderedViewport; nuclear@0: result.EyeToSourceUV = vsao.EyeToSourceUV; nuclear@0: nuclear@0: return result; nuclear@0: } nuclear@0: nuclear@0: nuclear@0: Vector3f CalculateEyeVirtualCameraOffset(HmdRenderInfo const &hmd, nuclear@0: StereoEye eyeType, bool bmonoRenderingMode) nuclear@0: { nuclear@0: Vector3f virtualCameraOffset(0); nuclear@0: nuclear@0: if (!bmonoRenderingMode) nuclear@0: { nuclear@0: float eyeCenterRelief = hmd.GetEyeCenter().ReliefInMeters; nuclear@0: nuclear@0: if (eyeType == StereoEye_Left) nuclear@0: { nuclear@0: virtualCameraOffset.x = hmd.EyeLeft.NoseToPupilInMeters; nuclear@0: virtualCameraOffset.z = eyeCenterRelief - hmd.EyeLeft.ReliefInMeters; nuclear@0: } nuclear@0: else if (eyeType == StereoEye_Right) nuclear@0: { nuclear@0: virtualCameraOffset.x = -hmd.EyeRight.NoseToPupilInMeters; nuclear@0: virtualCameraOffset.z = eyeCenterRelief - hmd.EyeRight.ReliefInMeters; nuclear@0: } nuclear@0: } nuclear@0: nuclear@0: return virtualCameraOffset; nuclear@0: } nuclear@0: nuclear@0: nuclear@0: //----------------------------------------------------------------------------------- nuclear@0: // **** Higher-level utility functions. nuclear@0: nuclear@0: Sizei CalculateRecommendedTextureSize ( HmdRenderInfo const &hmd, nuclear@0: bool bRendertargetSharedByBothEyes, nuclear@0: float pixelDensityInCenter /*= 1.0f*/ ) nuclear@0: { nuclear@0: Sizei idealPixelSize[2]; nuclear@0: for ( int eyeNum = 0; eyeNum < 2; eyeNum++ ) nuclear@0: { nuclear@0: StereoEye eyeType = ( eyeNum == 0 ) ? StereoEye_Left : StereoEye_Right; nuclear@0: nuclear@0: DistortionAndFov distortionAndFov = CalculateDistortionAndFovInternal ( eyeType, hmd, NULL, NULL, OVR_DEFAULT_EXTRA_EYE_ROTATION ); nuclear@0: nuclear@0: idealPixelSize[eyeNum] = CalculateIdealPixelSize ( eyeType, nuclear@0: distortionAndFov.Distortion, nuclear@0: distortionAndFov.Fov, nuclear@0: pixelDensityInCenter ); nuclear@0: } nuclear@0: nuclear@0: Sizei result; nuclear@0: result.w = Alg::Max ( idealPixelSize[0].w, idealPixelSize[1].w ); nuclear@0: result.h = Alg::Max ( idealPixelSize[0].h, idealPixelSize[1].h ); nuclear@0: if ( bRendertargetSharedByBothEyes ) nuclear@0: { nuclear@0: result.w *= 2; nuclear@0: } nuclear@0: return result; nuclear@0: } nuclear@0: nuclear@0: StereoEyeParams CalculateStereoEyeParams ( HmdRenderInfo const &hmd, nuclear@0: StereoEye eyeType, nuclear@0: Sizei const &actualRendertargetSurfaceSize, nuclear@0: bool bRendertargetSharedByBothEyes, nuclear@0: bool bRightHanded /*= true*/, nuclear@0: float zNear /*= 0.01f*/, float zFar /*= 10000.0f*/, nuclear@0: Sizei const *pOverrideRenderedPixelSize /* = NULL*/, nuclear@0: FovPort const *pOverrideFovport /*= NULL*/, nuclear@0: float zoomFactor /*= 1.0f*/ ) nuclear@0: { nuclear@0: DistortionAndFov distortionAndFov = CalculateDistortionAndFovInternal ( eyeType, hmd, NULL, NULL, OVR_DEFAULT_EXTRA_EYE_ROTATION ); nuclear@0: if ( pOverrideFovport != NULL ) nuclear@0: { nuclear@0: distortionAndFov.Fov = *pOverrideFovport; nuclear@0: } nuclear@0: nuclear@0: Recti viewport; nuclear@0: if ( pOverrideRenderedPixelSize != NULL ) nuclear@0: { nuclear@0: viewport = CalculateViewportInternal ( eyeType, actualRendertargetSurfaceSize, *pOverrideRenderedPixelSize, bRendertargetSharedByBothEyes, false ); nuclear@0: } nuclear@0: else nuclear@0: { nuclear@0: viewport = CalculateViewportDensityInternal ( eyeType, nuclear@0: distortionAndFov.Distortion, nuclear@0: distortionAndFov.Fov, nuclear@0: actualRendertargetSurfaceSize, bRendertargetSharedByBothEyes, 1.0f, false ); nuclear@0: } nuclear@0: nuclear@0: return CalculateStereoEyeParamsInternal ( nuclear@0: eyeType, hmd, nuclear@0: distortionAndFov.Distortion, nuclear@0: distortionAndFov.Fov, nuclear@0: actualRendertargetSurfaceSize, viewport, nuclear@0: bRightHanded, zNear, zFar, false, zoomFactor ); nuclear@0: } nuclear@0: nuclear@0: nuclear@0: FovPort CalculateRecommendedFov ( HmdRenderInfo const &hmd, nuclear@0: StereoEye eyeType, nuclear@0: bool bMakeFovSymmetrical /* = false */ ) nuclear@0: { nuclear@0: DistortionAndFov distortionAndFov = CalculateDistortionAndFovInternal ( eyeType, hmd, NULL, NULL, OVR_DEFAULT_EXTRA_EYE_ROTATION ); nuclear@0: FovPort fov = distortionAndFov.Fov; nuclear@0: if ( bMakeFovSymmetrical ) nuclear@0: { nuclear@0: // Deal with engines that cannot support an off-center projection. nuclear@0: // Unfortunately this means they will be rendering pixels that the user can't actually see. nuclear@0: float fovTanH = Alg::Max ( fov.LeftTan, fov.RightTan ); nuclear@0: float fovTanV = Alg::Max ( fov.UpTan, fov.DownTan ); nuclear@0: fov.LeftTan = fovTanH; nuclear@0: fov.RightTan = fovTanH; nuclear@0: fov.UpTan = fovTanV; nuclear@0: fov.DownTan = fovTanV; nuclear@0: } nuclear@0: return fov; nuclear@0: } nuclear@0: nuclear@0: ViewportScaleAndOffset ModifyRenderViewport ( StereoEyeParams const ¶ms, nuclear@0: Sizei const &actualRendertargetSurfaceSize, nuclear@0: Recti const &renderViewport ) nuclear@0: { nuclear@0: return CalculateViewportScaleAndOffsetInternal ( params.EyeToSourceNDC, renderViewport, actualRendertargetSurfaceSize ); nuclear@0: } nuclear@0: nuclear@0: ViewportScaleAndOffset ModifyRenderSize ( StereoEyeParams const ¶ms, nuclear@0: Sizei const &actualRendertargetSurfaceSize, nuclear@0: Sizei const &requestedRenderSize, nuclear@0: bool bRendertargetSharedByBothEyes /*= false*/ ) nuclear@0: { nuclear@0: Recti renderViewport = CalculateViewportInternal ( params.Eye, actualRendertargetSurfaceSize, requestedRenderSize, bRendertargetSharedByBothEyes, false ); nuclear@0: return CalculateViewportScaleAndOffsetInternal ( params.EyeToSourceNDC, renderViewport, actualRendertargetSurfaceSize ); nuclear@0: } nuclear@0: nuclear@0: ViewportScaleAndOffset ModifyRenderDensity ( StereoEyeParams const ¶ms, nuclear@0: Sizei const &actualRendertargetSurfaceSize, nuclear@0: float pixelDensity /*= 1.0f*/, nuclear@0: bool bRendertargetSharedByBothEyes /*= false*/ ) nuclear@0: { nuclear@0: Recti renderViewport = CalculateViewportDensityInternal ( params.Eye, params.Distortion, params.Fov, actualRendertargetSurfaceSize, bRendertargetSharedByBothEyes, pixelDensity, false ); nuclear@0: return CalculateViewportScaleAndOffsetInternal ( params.EyeToSourceNDC, renderViewport, actualRendertargetSurfaceSize ); nuclear@0: } nuclear@0: nuclear@0: nuclear@0: //----------------------------------------------------------------------------------- nuclear@0: // **** StereoConfig Implementation nuclear@0: nuclear@0: StereoConfig::StereoConfig(StereoMode mode) nuclear@0: : Mode(mode), nuclear@0: DirtyFlag(true) nuclear@0: { nuclear@0: // Initialize "fake" default HMD values for testing without HMD plugged in. nuclear@0: // These default values match those returned by DK1 nuclear@0: // (at least they did at time of writing - certainly good enough for debugging) nuclear@0: Hmd.HmdType = HmdType_None; nuclear@0: Hmd.ResolutionInPixels = Sizei(1280, 800); nuclear@0: Hmd.ScreenSizeInMeters = Sizef(0.1498f, 0.0936f); nuclear@0: Hmd.ScreenGapSizeInMeters = 0.0f; nuclear@0: Hmd.PelOffsetR = Vector2f ( 0.0f, 0.0f ); nuclear@0: Hmd.PelOffsetB = Vector2f ( 0.0f, 0.0f ); nuclear@0: Hmd.CenterFromTopInMeters = 0.0468f; nuclear@0: Hmd.LensSeparationInMeters = 0.0635f; nuclear@0: Hmd.LensDiameterInMeters = 0.035f; nuclear@0: Hmd.LensSurfaceToMidplateInMeters = 0.025f; nuclear@0: Hmd.EyeCups = EyeCup_DK1A; nuclear@0: Hmd.Shutter.Type = HmdShutter_RollingTopToBottom; nuclear@0: Hmd.Shutter.VsyncToNextVsync = ( 1.0f / 60.0f ); nuclear@0: Hmd.Shutter.VsyncToFirstScanline = 0.000052f; nuclear@0: Hmd.Shutter.FirstScanlineToLastScanline = 0.016580f; nuclear@0: Hmd.Shutter.PixelSettleTime = 0.015f; nuclear@0: Hmd.Shutter.PixelPersistence = ( 1.0f / 60.0f ); nuclear@0: Hmd.EyeLeft.Distortion.SetToIdentity(); nuclear@0: Hmd.EyeLeft.Distortion.MetersPerTanAngleAtCenter = 0.043875f; nuclear@0: Hmd.EyeLeft.Distortion.Eqn = Distortion_RecipPoly4; nuclear@0: Hmd.EyeLeft.Distortion.K[0] = 1.0f; nuclear@0: Hmd.EyeLeft.Distortion.K[1] = -0.3999f; nuclear@0: Hmd.EyeLeft.Distortion.K[2] = 0.2408f; nuclear@0: Hmd.EyeLeft.Distortion.K[3] = -0.4589f; nuclear@0: Hmd.EyeLeft.Distortion.MaxR = 1.0f; nuclear@0: Hmd.EyeLeft.Distortion.ChromaticAberration[0] = 0.006f; nuclear@0: Hmd.EyeLeft.Distortion.ChromaticAberration[1] = 0.0f; nuclear@0: Hmd.EyeLeft.Distortion.ChromaticAberration[2] = -0.014f; nuclear@0: Hmd.EyeLeft.Distortion.ChromaticAberration[3] = 0.0f; nuclear@0: Hmd.EyeLeft.NoseToPupilInMeters = 0.62f; nuclear@0: Hmd.EyeLeft.ReliefInMeters = 0.013f; nuclear@0: Hmd.EyeRight = Hmd.EyeLeft; nuclear@0: nuclear@0: SetViewportMode = SVPM_Density; nuclear@0: SetViewportPixelsPerDisplayPixel = 1.0f; nuclear@0: // Not used in this mode, but init them anyway. nuclear@0: SetViewportSize[0] = Sizei(0,0); nuclear@0: SetViewportSize[1] = Sizei(0,0); nuclear@0: SetViewport[0] = Recti(0,0,0,0); nuclear@0: SetViewport[1] = Recti(0,0,0,0); nuclear@0: nuclear@0: OverrideLens = false; nuclear@0: OverrideTanHalfFov = false; nuclear@0: OverrideZeroIpd = false; nuclear@0: ExtraEyeRotationInRadians = OVR_DEFAULT_EXTRA_EYE_ROTATION; nuclear@0: IsRendertargetSharedByBothEyes = true; nuclear@0: RightHandedProjection = true; nuclear@0: nuclear@0: // This should cause an assert if the app does not call SetRendertargetSize() nuclear@0: RendertargetSize = Sizei ( 0, 0 ); nuclear@0: nuclear@0: ZNear = 0.01f; nuclear@0: ZFar = 10000.0f; nuclear@0: nuclear@0: Set2DAreaFov(DegreeToRad(85.0f)); nuclear@0: } nuclear@0: nuclear@0: void StereoConfig::SetHmdRenderInfo(const HmdRenderInfo& hmd) nuclear@0: { nuclear@0: Hmd = hmd; nuclear@0: DirtyFlag = true; nuclear@0: } nuclear@0: nuclear@0: void StereoConfig::Set2DAreaFov(float fovRadians) nuclear@0: { nuclear@0: Area2DFov = fovRadians; nuclear@0: DirtyFlag = true; nuclear@0: } nuclear@0: nuclear@0: const StereoEyeParamsWithOrtho& StereoConfig::GetEyeRenderParams(StereoEye eye) nuclear@0: { nuclear@0: if ( DirtyFlag ) nuclear@0: { nuclear@0: UpdateComputedState(); nuclear@0: } nuclear@0: nuclear@0: static const uint8_t eyeParamIndices[3] = { 0, 0, 1 }; nuclear@0: nuclear@0: OVR_ASSERT(eye < sizeof(eyeParamIndices)); nuclear@0: return EyeRenderParams[eyeParamIndices[eye]]; nuclear@0: } nuclear@0: nuclear@0: void StereoConfig::SetLensOverride ( LensConfig const *pLensOverrideLeft /*= NULL*/, nuclear@0: LensConfig const *pLensOverrideRight /*= NULL*/ ) nuclear@0: { nuclear@0: if ( pLensOverrideLeft == NULL ) nuclear@0: { nuclear@0: OverrideLens = false; nuclear@0: } nuclear@0: else nuclear@0: { nuclear@0: OverrideLens = true; nuclear@0: LensOverrideLeft = *pLensOverrideLeft; nuclear@0: LensOverrideRight = *pLensOverrideLeft; nuclear@0: if ( pLensOverrideRight != NULL ) nuclear@0: { nuclear@0: LensOverrideRight = *pLensOverrideRight; nuclear@0: } nuclear@0: } nuclear@0: DirtyFlag = true; nuclear@0: } nuclear@0: nuclear@0: void StereoConfig::SetRendertargetSize (Size const rendertargetSize, nuclear@0: bool rendertargetIsSharedByBothEyes ) nuclear@0: { nuclear@0: RendertargetSize = rendertargetSize; nuclear@0: IsRendertargetSharedByBothEyes = rendertargetIsSharedByBothEyes; nuclear@0: DirtyFlag = true; nuclear@0: } nuclear@0: nuclear@0: void StereoConfig::SetFov ( FovPort const *pfovLeft /*= NULL*/, nuclear@0: FovPort const *pfovRight /*= NULL*/ ) nuclear@0: { nuclear@0: DirtyFlag = true; nuclear@0: if ( pfovLeft == NULL ) nuclear@0: { nuclear@0: OverrideTanHalfFov = false; nuclear@0: } nuclear@0: else nuclear@0: { nuclear@0: OverrideTanHalfFov = true; nuclear@0: FovOverrideLeft = *pfovLeft; nuclear@0: FovOverrideRight = *pfovLeft; nuclear@0: if ( pfovRight != NULL ) nuclear@0: { nuclear@0: FovOverrideRight = *pfovRight; nuclear@0: } nuclear@0: } nuclear@0: } nuclear@0: nuclear@0: nuclear@0: void StereoConfig::SetZeroVirtualIpdOverride ( bool enableOverride ) nuclear@0: { nuclear@0: DirtyFlag = true; nuclear@0: OverrideZeroIpd = enableOverride; nuclear@0: } nuclear@0: nuclear@0: nuclear@0: void StereoConfig::SetZClipPlanesAndHandedness ( float zNear /*= 0.01f*/, float zFar /*= 10000.0f*/, bool rightHandedProjection /*= true*/ ) nuclear@0: { nuclear@0: DirtyFlag = true; nuclear@0: ZNear = zNear; nuclear@0: ZFar = zFar; nuclear@0: RightHandedProjection = rightHandedProjection; nuclear@0: } nuclear@0: nuclear@0: void StereoConfig::SetExtraEyeRotation ( float extraEyeRotationInRadians ) nuclear@0: { nuclear@0: DirtyFlag = true; nuclear@0: ExtraEyeRotationInRadians = extraEyeRotationInRadians; nuclear@0: } nuclear@0: nuclear@0: Sizei StereoConfig::CalculateRecommendedTextureSize ( bool rendertargetSharedByBothEyes, nuclear@0: float pixelDensityInCenter /*= 1.0f*/ ) nuclear@0: { nuclear@0: return Render::CalculateRecommendedTextureSize ( Hmd, rendertargetSharedByBothEyes, pixelDensityInCenter ); nuclear@0: } nuclear@0: nuclear@0: nuclear@0: nuclear@0: void StereoConfig::UpdateComputedState() nuclear@0: { nuclear@0: int numEyes = 2; nuclear@0: StereoEye eyeTypes[2]; nuclear@0: nuclear@0: switch ( Mode ) nuclear@0: { nuclear@0: case Stereo_None: nuclear@0: numEyes = 1; nuclear@0: eyeTypes[0] = StereoEye_Center; nuclear@0: break; nuclear@0: nuclear@0: case Stereo_LeftRight_Multipass: nuclear@0: numEyes = 2; nuclear@0: eyeTypes[0] = StereoEye_Left; nuclear@0: eyeTypes[1] = StereoEye_Right; nuclear@0: break; nuclear@0: nuclear@0: default: nuclear@0: numEyes = 0; nuclear@0: OVR_ASSERT( false ); nuclear@0: break; nuclear@0: } nuclear@0: nuclear@0: // If either of these fire, you've probably forgotten to call SetRendertargetSize() nuclear@0: OVR_ASSERT ( RendertargetSize.w > 0 ); nuclear@0: OVR_ASSERT ( RendertargetSize.h > 0 ); nuclear@0: nuclear@0: for ( int eyeNum = 0; eyeNum < numEyes; eyeNum++ ) nuclear@0: { nuclear@0: StereoEye eyeType = eyeTypes[eyeNum]; nuclear@0: LensConfig *pLensOverride = NULL; nuclear@0: if ( OverrideLens ) nuclear@0: { nuclear@0: if ( eyeType == StereoEye_Right ) nuclear@0: { nuclear@0: pLensOverride = &LensOverrideRight; nuclear@0: } nuclear@0: else nuclear@0: { nuclear@0: pLensOverride = &LensOverrideLeft; nuclear@0: } nuclear@0: } nuclear@0: nuclear@0: FovPort *pTanHalfFovOverride = NULL; nuclear@0: if ( OverrideTanHalfFov ) nuclear@0: { nuclear@0: if ( eyeType == StereoEye_Right ) nuclear@0: { nuclear@0: pTanHalfFovOverride = &FovOverrideRight; nuclear@0: } nuclear@0: else nuclear@0: { nuclear@0: pTanHalfFovOverride = &FovOverrideLeft; nuclear@0: } nuclear@0: } nuclear@0: nuclear@0: DistortionAndFov distortionAndFov = nuclear@0: CalculateDistortionAndFovInternal ( eyeType, Hmd, nuclear@0: pLensOverride, pTanHalfFovOverride, nuclear@0: ExtraEyeRotationInRadians ); nuclear@0: nuclear@0: EyeRenderParams[eyeNum].StereoEye.Distortion = distortionAndFov.Distortion; nuclear@0: EyeRenderParams[eyeNum].StereoEye.Fov = distortionAndFov.Fov; nuclear@0: } nuclear@0: nuclear@0: if ( OverrideZeroIpd ) nuclear@0: { nuclear@0: // Take the union of the calculated eye FOVs. nuclear@0: FovPort fov; nuclear@0: fov.UpTan = Alg::Max ( EyeRenderParams[0].StereoEye.Fov.UpTan , EyeRenderParams[1].StereoEye.Fov.UpTan ); nuclear@0: fov.DownTan = Alg::Max ( EyeRenderParams[0].StereoEye.Fov.DownTan , EyeRenderParams[1].StereoEye.Fov.DownTan ); nuclear@0: fov.LeftTan = Alg::Max ( EyeRenderParams[0].StereoEye.Fov.LeftTan , EyeRenderParams[1].StereoEye.Fov.LeftTan ); nuclear@0: fov.RightTan = Alg::Max ( EyeRenderParams[0].StereoEye.Fov.RightTan, EyeRenderParams[1].StereoEye.Fov.RightTan ); nuclear@0: EyeRenderParams[0].StereoEye.Fov = fov; nuclear@0: EyeRenderParams[1].StereoEye.Fov = fov; nuclear@0: } nuclear@0: nuclear@0: for ( int eyeNum = 0; eyeNum < numEyes; eyeNum++ ) nuclear@0: { nuclear@0: StereoEye eyeType = eyeTypes[eyeNum]; nuclear@0: nuclear@0: DistortionRenderDesc localDistortion = EyeRenderParams[eyeNum].StereoEye.Distortion; nuclear@0: FovPort fov = EyeRenderParams[eyeNum].StereoEye.Fov; nuclear@0: nuclear@0: // Use a placeholder - will be overridden later. nuclear@0: Recti tempViewport = Recti ( 0, 0, 1, 1 ); nuclear@0: nuclear@0: EyeRenderParams[eyeNum].StereoEye = CalculateStereoEyeParamsInternal ( nuclear@0: eyeType, Hmd, localDistortion, fov, nuclear@0: RendertargetSize, tempViewport, nuclear@0: RightHandedProjection, ZNear, ZFar, nuclear@0: OverrideZeroIpd ); nuclear@0: nuclear@0: // We want to create a virtual 2D surface we can draw debug text messages to. nuclear@0: // We'd like it to be a fixed distance (OrthoDistance) away, nuclear@0: // and to cover a specific FOV (Area2DFov). We need to find the projection matrix for this, nuclear@0: // and also to know how large it is in pixels to achieve a 1:1 mapping at the center of the screen. nuclear@0: float orthoDistance = 0.8f; nuclear@0: float orthoHalfFov = tanf ( Area2DFov * 0.5f ); nuclear@0: Vector2f unityOrthoPixelSize = localDistortion.PixelsPerTanAngleAtCenter * ( orthoHalfFov * 2.0f ); nuclear@0: float localInterpupillaryDistance = Hmd.EyeLeft.NoseToPupilInMeters + Hmd.EyeRight.NoseToPupilInMeters; nuclear@0: if ( OverrideZeroIpd ) nuclear@0: { nuclear@0: localInterpupillaryDistance = 0.0f; nuclear@0: } nuclear@0: Matrix4f ortho = CreateOrthoSubProjection ( true, eyeType, nuclear@0: orthoHalfFov, orthoHalfFov, nuclear@0: unityOrthoPixelSize.x, unityOrthoPixelSize.y, nuclear@0: orthoDistance, localInterpupillaryDistance, nuclear@0: EyeRenderParams[eyeNum].StereoEye.RenderedProjection ); nuclear@0: EyeRenderParams[eyeNum].OrthoProjection = ortho; nuclear@0: } nuclear@0: nuclear@0: // ...and now set up the viewport, scale & offset the way the app wanted. nuclear@0: setupViewportScaleAndOffsets(); nuclear@0: nuclear@0: if ( OverrideZeroIpd ) nuclear@0: { nuclear@0: // Monocular rendering has some fragile parts... don't break any by accident. nuclear@0: OVR_ASSERT ( EyeRenderParams[0].StereoEye.Fov.UpTan == EyeRenderParams[1].StereoEye.Fov.UpTan ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].StereoEye.Fov.DownTan == EyeRenderParams[1].StereoEye.Fov.DownTan ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].StereoEye.Fov.LeftTan == EyeRenderParams[1].StereoEye.Fov.LeftTan ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].StereoEye.Fov.RightTan == EyeRenderParams[1].StereoEye.Fov.RightTan ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedProjection.M[0][0] == EyeRenderParams[1].StereoEye.RenderedProjection.M[0][0] ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedProjection.M[1][1] == EyeRenderParams[1].StereoEye.RenderedProjection.M[1][1] ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedProjection.M[0][2] == EyeRenderParams[1].StereoEye.RenderedProjection.M[0][2] ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedProjection.M[1][2] == EyeRenderParams[1].StereoEye.RenderedProjection.M[1][2] ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].StereoEye.RenderedViewport == EyeRenderParams[1].StereoEye.RenderedViewport ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].StereoEye.EyeToSourceUV.Offset == EyeRenderParams[1].StereoEye.EyeToSourceUV.Offset ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].StereoEye.EyeToSourceUV.Scale == EyeRenderParams[1].StereoEye.EyeToSourceUV.Scale ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].StereoEye.EyeToSourceNDC.Offset == EyeRenderParams[1].StereoEye.EyeToSourceNDC.Offset ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].StereoEye.EyeToSourceNDC.Scale == EyeRenderParams[1].StereoEye.EyeToSourceNDC.Scale ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].OrthoProjection.M[0][0] == EyeRenderParams[1].OrthoProjection.M[0][0] ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].OrthoProjection.M[1][1] == EyeRenderParams[1].OrthoProjection.M[1][1] ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].OrthoProjection.M[0][2] == EyeRenderParams[1].OrthoProjection.M[0][2] ); nuclear@0: OVR_ASSERT ( EyeRenderParams[0].OrthoProjection.M[1][2] == EyeRenderParams[1].OrthoProjection.M[1][2] ); nuclear@0: } nuclear@0: nuclear@0: DirtyFlag = false; nuclear@0: } nuclear@0: nuclear@0: nuclear@0: nuclear@0: ViewportScaleAndOffsetBothEyes StereoConfig::setupViewportScaleAndOffsets() nuclear@0: { nuclear@0: for ( int eyeNum = 0; eyeNum < 2; eyeNum++ ) nuclear@0: { nuclear@0: StereoEye eyeType = ( eyeNum == 0 ) ? StereoEye_Left : StereoEye_Right; nuclear@0: nuclear@0: DistortionRenderDesc localDistortion = EyeRenderParams[eyeNum].StereoEye.Distortion; nuclear@0: FovPort fov = EyeRenderParams[eyeNum].StereoEye.Fov; nuclear@0: nuclear@0: Recti renderedViewport; nuclear@0: switch ( SetViewportMode ) nuclear@0: { nuclear@0: case SVPM_Density: nuclear@0: renderedViewport = CalculateViewportDensityInternal ( nuclear@0: eyeType, localDistortion, fov, nuclear@0: RendertargetSize, IsRendertargetSharedByBothEyes, nuclear@0: SetViewportPixelsPerDisplayPixel, OverrideZeroIpd ); nuclear@0: break; nuclear@0: case SVPM_Size: nuclear@0: if ( ( eyeType == StereoEye_Right ) && !OverrideZeroIpd ) nuclear@0: { nuclear@0: renderedViewport = CalculateViewportInternal ( nuclear@0: eyeType, RendertargetSize, nuclear@0: SetViewportSize[1], nuclear@0: IsRendertargetSharedByBothEyes, OverrideZeroIpd ); nuclear@0: } nuclear@0: else nuclear@0: { nuclear@0: renderedViewport = CalculateViewportInternal ( nuclear@0: eyeType, RendertargetSize, nuclear@0: SetViewportSize[0], nuclear@0: IsRendertargetSharedByBothEyes, OverrideZeroIpd ); nuclear@0: } nuclear@0: break; nuclear@0: case SVPM_Viewport: nuclear@0: if ( ( eyeType == StereoEye_Right ) && !OverrideZeroIpd ) nuclear@0: { nuclear@0: renderedViewport = SetViewport[1]; nuclear@0: } nuclear@0: else nuclear@0: { nuclear@0: renderedViewport = SetViewport[0]; nuclear@0: } nuclear@0: break; nuclear@0: default: OVR_ASSERT ( false ); break; nuclear@0: } nuclear@0: nuclear@0: ViewportScaleAndOffset vpsao = CalculateViewportScaleAndOffsetInternal ( nuclear@0: EyeRenderParams[eyeNum].StereoEye.EyeToSourceNDC, nuclear@0: renderedViewport, nuclear@0: RendertargetSize ); nuclear@0: EyeRenderParams[eyeNum].StereoEye.RenderedViewport = vpsao.RenderedViewport; nuclear@0: EyeRenderParams[eyeNum].StereoEye.EyeToSourceUV = vpsao.EyeToSourceUV; nuclear@0: } nuclear@0: nuclear@0: ViewportScaleAndOffsetBothEyes result; nuclear@0: result.Left.EyeToSourceUV = EyeRenderParams[0].StereoEye.EyeToSourceUV; nuclear@0: result.Left.RenderedViewport = EyeRenderParams[0].StereoEye.RenderedViewport; nuclear@0: result.Right.EyeToSourceUV = EyeRenderParams[1].StereoEye.EyeToSourceUV; nuclear@0: result.Right.RenderedViewport = EyeRenderParams[1].StereoEye.RenderedViewport; nuclear@0: return result; nuclear@0: } nuclear@0: nuclear@0: // Specify a pixel density - how many rendered pixels per pixel in the physical display. nuclear@0: ViewportScaleAndOffsetBothEyes StereoConfig::SetRenderDensity ( float pixelsPerDisplayPixel ) nuclear@0: { nuclear@0: SetViewportMode = SVPM_Density; nuclear@0: SetViewportPixelsPerDisplayPixel = pixelsPerDisplayPixel; nuclear@0: return setupViewportScaleAndOffsets(); nuclear@0: } nuclear@0: nuclear@0: // Supply the size directly. Will be clamped to the physical rendertarget size. nuclear@0: ViewportScaleAndOffsetBothEyes StereoConfig::SetRenderSize ( Sizei const &renderSizeLeft, Sizei const &renderSizeRight ) nuclear@0: { nuclear@0: SetViewportMode = SVPM_Size; nuclear@0: SetViewportSize[0] = renderSizeLeft; nuclear@0: SetViewportSize[1] = renderSizeRight; nuclear@0: return setupViewportScaleAndOffsets(); nuclear@0: } nuclear@0: nuclear@0: // Supply the viewport directly. This is not clamped to the physical rendertarget - careful now! nuclear@0: ViewportScaleAndOffsetBothEyes StereoConfig::SetRenderViewport ( Recti const &renderViewportLeft, Recti const &renderViewportRight ) nuclear@0: { nuclear@0: SetViewportMode = SVPM_Viewport; nuclear@0: SetViewport[0] = renderViewportLeft; nuclear@0: SetViewport[1] = renderViewportRight; nuclear@0: return setupViewportScaleAndOffsets(); nuclear@0: } nuclear@0: nuclear@0: Matrix4f StereoConfig::GetProjectionWithZoom ( StereoEye eye, float fovZoom ) const nuclear@0: { nuclear@0: int eyeNum = ( eye == StereoEye_Right ) ? 1 : 0; nuclear@0: float fovScale = 1.0f / fovZoom; nuclear@0: FovPort fovPort = EyeRenderParams[eyeNum].StereoEye.Fov; nuclear@0: fovPort.LeftTan *= fovScale; nuclear@0: fovPort.RightTan *= fovScale; nuclear@0: fovPort.UpTan *= fovScale; nuclear@0: fovPort.DownTan *= fovScale; nuclear@0: return CreateProjection ( RightHandedProjection, fovPort, ZNear, ZFar ); nuclear@0: } nuclear@0: nuclear@0: nuclear@0: nuclear@0: nuclear@0: //----------------------------------------------------------------------------------- nuclear@0: // ***** Distortion Mesh Rendering nuclear@0: nuclear@0: nuclear@0: // Pow2 for the Morton order to work! nuclear@0: // 4 is too low - it is easy to see the "wobbles" in the HMD. nuclear@0: // 5 is realllly close but you can see pixel differences with even/odd frame checking. nuclear@0: // 6 is indistinguishable on a monitor on even/odd frames. nuclear@0: static const int DMA_GridSizeLog2 = 6; nuclear@0: static const int DMA_GridSize = 1< 0.5, then right goes 0.5 -> 1.0 nuclear@0: result.TimewarpLerp = screenNDC.x * 0.25f + 0.25f; nuclear@0: if (rightEye) nuclear@0: { nuclear@0: result.TimewarpLerp += 0.5f; nuclear@0: } nuclear@0: break; nuclear@0: case HmdShutter_RollingRightToLeft: nuclear@0: // Retrace is right to left - right eye goes 0.0 -> 0.5, then left goes 0.5 -> 1.0 nuclear@0: result.TimewarpLerp = 0.75f - screenNDC.x * 0.25f; nuclear@0: if (rightEye) nuclear@0: { nuclear@0: result.TimewarpLerp -= 0.5f; nuclear@0: } nuclear@0: break; nuclear@0: case HmdShutter_RollingTopToBottom: nuclear@0: // Retrace is top to bottom on both eyes at the same time. nuclear@0: result.TimewarpLerp = screenNDC.y * 0.5f + 0.5f; nuclear@0: break; nuclear@0: default: OVR_ASSERT ( false ); break; nuclear@0: } nuclear@0: nuclear@0: // When does the fade-to-black edge start? Chosen heuristically. nuclear@0: float fadeOutBorderFractionTexture = 0.1f; nuclear@0: float fadeOutBorderFractionTextureInnerEdge = 0.1f; nuclear@0: float fadeOutBorderFractionScreen = 0.1f; nuclear@0: float fadeOutFloor = 0.6f; // the floor controls how much black is in the fade region nuclear@0: nuclear@0: if (hmdRenderInfo.HmdType == HmdType_DK1) nuclear@0: { nuclear@0: fadeOutBorderFractionTexture = 0.3f; nuclear@0: fadeOutBorderFractionTextureInnerEdge = 0.075f; nuclear@0: fadeOutBorderFractionScreen = 0.075f; nuclear@0: fadeOutFloor = 0.25f; nuclear@0: } nuclear@0: nuclear@0: // Fade out at texture edges. nuclear@0: // The furthest out will be the blue channel, because of chromatic aberration (true of any standard lens) nuclear@0: Vector2f sourceTexCoordBlueNDC = TransformTanFovSpaceToRendertargetNDC ( eyeToSourceNDC, tanEyeAnglesB ); nuclear@0: if (rightEye) nuclear@0: { nuclear@0: // The inner edge of the eye texture is usually much more magnified, because it's right against the middle of the screen, not the FOV edge. nuclear@0: // So we want a different scaling factor for that. This code flips the texture NDC so that +1.0 is the inner edge nuclear@0: sourceTexCoordBlueNDC.x = -sourceTexCoordBlueNDC.x; nuclear@0: } nuclear@0: float edgeFadeIn = ( 1.0f / fadeOutBorderFractionTextureInnerEdge ) * ( 1.0f - sourceTexCoordBlueNDC.x ) ; // Inner nuclear@0: edgeFadeIn = Alg::Min ( edgeFadeIn, ( 1.0f / fadeOutBorderFractionTexture ) * ( 1.0f + sourceTexCoordBlueNDC.x ) ); // Outer nuclear@0: edgeFadeIn = Alg::Min ( edgeFadeIn, ( 1.0f / fadeOutBorderFractionTexture ) * ( 1.0f - sourceTexCoordBlueNDC.y ) ); // Upper nuclear@0: edgeFadeIn = Alg::Min ( edgeFadeIn, ( 1.0f / fadeOutBorderFractionTexture ) * ( 1.0f + sourceTexCoordBlueNDC.y ) ); // Lower nuclear@0: nuclear@0: // Also fade out at screen edges. Since this is in pixel space, no need to do inner specially. nuclear@0: float edgeFadeInScreen = ( 1.0f / fadeOutBorderFractionScreen ) * nuclear@0: ( 1.0f - Alg::Max ( Alg::Abs ( screenNDC.x ), Alg::Abs ( screenNDC.y ) ) ); nuclear@0: edgeFadeIn = Alg::Min ( edgeFadeInScreen, edgeFadeIn ) + fadeOutFloor; nuclear@0: nuclear@0: // Note - this is NOT clamped negatively. nuclear@0: // For rendering methods that interpolate over a coarse grid, we need the values to go negative for correct intersection with zero. nuclear@0: result.Shade = Alg::Min ( edgeFadeIn, 1.0f ); nuclear@0: result.ScreenPosNDC.x = 0.5f * screenNDC.x - 0.5f + xOffset; nuclear@0: result.ScreenPosNDC.y = -screenNDC.y; nuclear@0: nuclear@0: return result; nuclear@0: } nuclear@0: nuclear@0: nuclear@0: void DistortionMeshDestroy ( DistortionMeshVertexData *pVertices, uint16_t *pTriangleMeshIndices ) nuclear@0: { nuclear@0: OVR_FREE ( pVertices ); nuclear@0: OVR_FREE ( pTriangleMeshIndices ); nuclear@0: } nuclear@0: nuclear@0: void DistortionMeshCreate ( DistortionMeshVertexData **ppVertices, uint16_t **ppTriangleListIndices, nuclear@0: int *pNumVertices, int *pNumTriangles, nuclear@0: const StereoEyeParams &stereoParams, const HmdRenderInfo &hmdRenderInfo ) nuclear@0: { nuclear@0: bool rightEye = ( stereoParams.Eye == StereoEye_Right ); nuclear@0: int vertexCount = 0; nuclear@0: int triangleCount = 0; nuclear@0: nuclear@0: // Generate mesh into allocated data and return result. nuclear@0: DistortionMeshCreate(ppVertices, ppTriangleListIndices, &vertexCount, &triangleCount, nuclear@0: rightEye, hmdRenderInfo, stereoParams.Distortion, stereoParams.EyeToSourceNDC); nuclear@0: nuclear@0: *pNumVertices = vertexCount; nuclear@0: *pNumTriangles = triangleCount; nuclear@0: } nuclear@0: nuclear@0: nuclear@0: // Generate distortion mesh for a eye. nuclear@0: void DistortionMeshCreate( DistortionMeshVertexData **ppVertices, uint16_t **ppTriangleListIndices, nuclear@0: int *pNumVertices, int *pNumTriangles, nuclear@0: bool rightEye, nuclear@0: const HmdRenderInfo &hmdRenderInfo, nuclear@0: const DistortionRenderDesc &distortion, const ScaleAndOffset2D &eyeToSourceNDC ) nuclear@0: { nuclear@0: *pNumVertices = DMA_NumVertsPerEye; nuclear@0: *pNumTriangles = DMA_NumTrisPerEye; nuclear@0: nuclear@0: *ppVertices = (DistortionMeshVertexData*) nuclear@0: OVR_ALLOC( sizeof(DistortionMeshVertexData) * (*pNumVertices) ); nuclear@0: *ppTriangleListIndices = (uint16_t*) OVR_ALLOC( sizeof(uint16_t) * (*pNumTriangles) * 3 ); nuclear@0: nuclear@0: if (!*ppVertices || !*ppTriangleListIndices) nuclear@0: { nuclear@0: if (*ppVertices) nuclear@0: { nuclear@0: OVR_FREE(*ppVertices); nuclear@0: } nuclear@0: if (*ppTriangleListIndices) nuclear@0: { nuclear@0: OVR_FREE(*ppTriangleListIndices); nuclear@0: } nuclear@0: *ppVertices = NULL; nuclear@0: *ppTriangleListIndices = NULL; nuclear@0: *pNumTriangles = 0; nuclear@0: *pNumVertices = 0; nuclear@0: return; nuclear@0: } nuclear@0: nuclear@0: nuclear@0: nuclear@0: // Populate vertex buffer info nuclear@0: nuclear@0: // First pass - build up raw vertex data. nuclear@0: DistortionMeshVertexData* pcurVert = *ppVertices; nuclear@0: nuclear@0: for ( int y = 0; y <= DMA_GridSize; y++ ) nuclear@0: { nuclear@0: for ( int x = 0; x <= DMA_GridSize; x++ ) nuclear@0: { nuclear@0: nuclear@0: Vector2f sourceCoordNDC; nuclear@0: // NDC texture coords [-1,+1] nuclear@0: sourceCoordNDC.x = 2.0f * ( (float)x / (float)DMA_GridSize ) - 1.0f; nuclear@0: sourceCoordNDC.y = 2.0f * ( (float)y / (float)DMA_GridSize ) - 1.0f; nuclear@0: Vector2f tanEyeAngle = TransformRendertargetNDCToTanFovSpace ( eyeToSourceNDC, sourceCoordNDC ); nuclear@0: nuclear@0: // Find a corresponding screen position. nuclear@0: // Note - this function does not have to be precise - we're just trying to match the mesh tessellation nuclear@0: // with the shape of the distortion to minimise the number of trianlges needed. nuclear@0: Vector2f screenNDC = TransformTanFovSpaceToScreenNDC ( distortion, tanEyeAngle, false ); nuclear@0: // ...but don't let verts overlap to the other eye. nuclear@0: screenNDC.x = Alg::Max ( -1.0f, Alg::Min ( screenNDC.x, 1.0f ) ); nuclear@0: screenNDC.y = Alg::Max ( -1.0f, Alg::Min ( screenNDC.y, 1.0f ) ); nuclear@0: nuclear@0: // From those screen positions, generate the vertex. nuclear@0: *pcurVert = DistortionMeshMakeVertex ( screenNDC, rightEye, hmdRenderInfo, distortion, eyeToSourceNDC ); nuclear@0: pcurVert++; nuclear@0: } nuclear@0: } nuclear@0: nuclear@0: nuclear@0: // Populate index buffer info nuclear@0: uint16_t *pcurIndex = *ppTriangleListIndices; nuclear@0: nuclear@0: for ( int triNum = 0; triNum < DMA_GridSize * DMA_GridSize; triNum++ ) nuclear@0: { nuclear@0: // Use a Morton order to help locality of FB, texture and vertex cache. nuclear@0: // (0.325ms raster order -> 0.257ms Morton order) nuclear@0: OVR_ASSERT ( DMA_GridSize <= 256 ); nuclear@0: int x = ( ( triNum & 0x0001 ) >> 0 ) | nuclear@0: ( ( triNum & 0x0004 ) >> 1 ) | nuclear@0: ( ( triNum & 0x0010 ) >> 2 ) | nuclear@0: ( ( triNum & 0x0040 ) >> 3 ) | nuclear@0: ( ( triNum & 0x0100 ) >> 4 ) | nuclear@0: ( ( triNum & 0x0400 ) >> 5 ) | nuclear@0: ( ( triNum & 0x1000 ) >> 6 ) | nuclear@0: ( ( triNum & 0x4000 ) >> 7 ); nuclear@0: int y = ( ( triNum & 0x0002 ) >> 1 ) | nuclear@0: ( ( triNum & 0x0008 ) >> 2 ) | nuclear@0: ( ( triNum & 0x0020 ) >> 3 ) | nuclear@0: ( ( triNum & 0x0080 ) >> 4 ) | nuclear@0: ( ( triNum & 0x0200 ) >> 5 ) | nuclear@0: ( ( triNum & 0x0800 ) >> 6 ) | nuclear@0: ( ( triNum & 0x2000 ) >> 7 ) | nuclear@0: ( ( triNum & 0x8000 ) >> 8 ); nuclear@0: int FirstVertex = x * (DMA_GridSize+1) + y; nuclear@0: // Another twist - we want the top-left and bottom-right quadrants to nuclear@0: // have the triangles split one way, the other two split the other. nuclear@0: // +---+---+---+---+ nuclear@0: // | /| /|\ |\ | nuclear@0: // | / | / | \ | \ | nuclear@0: // |/ |/ | \| \| nuclear@0: // +---+---+---+---+ nuclear@0: // | /| /|\ |\ | nuclear@0: // | / | / | \ | \ | nuclear@0: // |/ |/ | \| \| nuclear@0: // +---+---+---+---+ nuclear@0: // |\ |\ | /| /| nuclear@0: // | \ | \ | / | / | nuclear@0: // | \| \|/ |/ | nuclear@0: // +---+---+---+---+ nuclear@0: // |\ |\ | /| /| nuclear@0: // | \ | \ | / | / | nuclear@0: // | \| \|/ |/ | nuclear@0: // +---+---+---+---+ nuclear@0: // This way triangle edges don't span long distances over the distortion function, nuclear@0: // so linear interpolation works better & we can use fewer tris. nuclear@0: if ( ( x < DMA_GridSize/2 ) != ( y < DMA_GridSize/2 ) ) // != is logical XOR nuclear@0: { nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+1; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+(DMA_GridSize+1)+1; nuclear@0: nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+(DMA_GridSize+1)+1; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+(DMA_GridSize+1); nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex; nuclear@0: } nuclear@0: else nuclear@0: { nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+1; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+(DMA_GridSize+1); nuclear@0: nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+1; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+(DMA_GridSize+1)+1; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+(DMA_GridSize+1); nuclear@0: } nuclear@0: } nuclear@0: } nuclear@0: nuclear@0: //----------------------------------------------------------------------------------- nuclear@0: // ***** Heightmap Mesh Rendering nuclear@0: nuclear@0: nuclear@0: static const int HMA_GridSizeLog2 = 7; nuclear@0: static const int HMA_GridSize = 1<TanEyeAngles = tanEyeAngle; nuclear@0: nuclear@0: HmdShutterTypeEnum shutterType = hmdRenderInfo.Shutter.Type; nuclear@0: switch ( shutterType ) nuclear@0: { nuclear@0: case HmdShutter_Global: nuclear@0: pcurVert->TimewarpLerp = 0.0f; nuclear@0: break; nuclear@0: case HmdShutter_RollingLeftToRight: nuclear@0: // Retrace is left to right - left eye goes 0.0 -> 0.5, then right goes 0.5 -> 1.0 nuclear@0: pcurVert->TimewarpLerp = sourceCoordNDC.x * 0.25f + 0.25f; nuclear@0: if (rightEye) nuclear@0: { nuclear@0: pcurVert->TimewarpLerp += 0.5f; nuclear@0: } nuclear@0: break; nuclear@0: case HmdShutter_RollingRightToLeft: nuclear@0: // Retrace is right to left - right eye goes 0.0 -> 0.5, then left goes 0.5 -> 1.0 nuclear@0: pcurVert->TimewarpLerp = 0.75f - sourceCoordNDC.x * 0.25f; nuclear@0: if (rightEye) nuclear@0: { nuclear@0: pcurVert->TimewarpLerp -= 0.5f; nuclear@0: } nuclear@0: break; nuclear@0: case HmdShutter_RollingTopToBottom: nuclear@0: // Retrace is top to bottom on both eyes at the same time. nuclear@0: pcurVert->TimewarpLerp = sourceCoordNDC.y * 0.5f + 0.5f; nuclear@0: break; nuclear@0: default: OVR_ASSERT ( false ); break; nuclear@0: } nuclear@0: nuclear@0: // Don't let verts overlap to the other eye. nuclear@0: //sourceCoordNDC.x = Alg::Max ( -1.0f, Alg::Min ( sourceCoordNDC.x, 1.0f ) ); nuclear@0: //sourceCoordNDC.y = Alg::Max ( -1.0f, Alg::Min ( sourceCoordNDC.y, 1.0f ) ); nuclear@0: nuclear@0: //pcurVert->ScreenPosNDC.x = 0.5f * sourceCoordNDC.x - 0.5f + xOffset; nuclear@0: pcurVert->ScreenPosNDC.x = sourceCoordNDC.x; nuclear@0: pcurVert->ScreenPosNDC.y = -sourceCoordNDC.y; nuclear@0: nuclear@0: pcurVert++; nuclear@0: } nuclear@0: } nuclear@0: nuclear@0: nuclear@0: // Populate index buffer info nuclear@0: uint16_t *pcurIndex = *ppTriangleListIndices; nuclear@0: nuclear@0: for ( int triNum = 0; triNum < HMA_GridSize * HMA_GridSize; triNum++ ) nuclear@0: { nuclear@0: // Use a Morton order to help locality of FB, texture and vertex cache. nuclear@0: // (0.325ms raster order -> 0.257ms Morton order) nuclear@0: OVR_ASSERT ( HMA_GridSize < 256 ); nuclear@0: int x = ( ( triNum & 0x0001 ) >> 0 ) | nuclear@0: ( ( triNum & 0x0004 ) >> 1 ) | nuclear@0: ( ( triNum & 0x0010 ) >> 2 ) | nuclear@0: ( ( triNum & 0x0040 ) >> 3 ) | nuclear@0: ( ( triNum & 0x0100 ) >> 4 ) | nuclear@0: ( ( triNum & 0x0400 ) >> 5 ) | nuclear@0: ( ( triNum & 0x1000 ) >> 6 ) | nuclear@0: ( ( triNum & 0x4000 ) >> 7 ); nuclear@0: int y = ( ( triNum & 0x0002 ) >> 1 ) | nuclear@0: ( ( triNum & 0x0008 ) >> 2 ) | nuclear@0: ( ( triNum & 0x0020 ) >> 3 ) | nuclear@0: ( ( triNum & 0x0080 ) >> 4 ) | nuclear@0: ( ( triNum & 0x0200 ) >> 5 ) | nuclear@0: ( ( triNum & 0x0800 ) >> 6 ) | nuclear@0: ( ( triNum & 0x2000 ) >> 7 ) | nuclear@0: ( ( triNum & 0x8000 ) >> 8 ); nuclear@0: int FirstVertex = x * (HMA_GridSize+1) + y; nuclear@0: // Another twist - we want the top-left and bottom-right quadrants to nuclear@0: // have the triangles split one way, the other two split the other. nuclear@0: // +---+---+---+---+ nuclear@0: // | /| /|\ |\ | nuclear@0: // | / | / | \ | \ | nuclear@0: // |/ |/ | \| \| nuclear@0: // +---+---+---+---+ nuclear@0: // | /| /|\ |\ | nuclear@0: // | / | / | \ | \ | nuclear@0: // |/ |/ | \| \| nuclear@0: // +---+---+---+---+ nuclear@0: // |\ |\ | /| /| nuclear@0: // | \ | \ | / | / | nuclear@0: // | \| \|/ |/ | nuclear@0: // +---+---+---+---+ nuclear@0: // |\ |\ | /| /| nuclear@0: // | \ | \ | / | / | nuclear@0: // | \| \|/ |/ | nuclear@0: // +---+---+---+---+ nuclear@0: // This way triangle edges don't span long distances over the distortion function, nuclear@0: // so linear interpolation works better & we can use fewer tris. nuclear@0: if ( ( x < HMA_GridSize/2 ) != ( y < HMA_GridSize/2 ) ) // != is logical XOR nuclear@0: { nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+1; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+(HMA_GridSize+1)+1; nuclear@0: nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+(HMA_GridSize+1)+1; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+(HMA_GridSize+1); nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex; nuclear@0: } nuclear@0: else nuclear@0: { nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+1; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+(HMA_GridSize+1); nuclear@0: nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+1; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+(HMA_GridSize+1)+1; nuclear@0: *pcurIndex++ = (uint16_t)FirstVertex+(HMA_GridSize+1); nuclear@0: } nuclear@0: } nuclear@0: } nuclear@0: nuclear@0: //----------------------------------------------------------------------------------- nuclear@0: // ***** Prediction and timewarp. nuclear@0: // nuclear@0: nuclear@0: // Calculates the values from the HMD info. nuclear@0: PredictionValues PredictionGetDeviceValues ( const HmdRenderInfo &hmdRenderInfo, nuclear@0: bool withTimewarp /*= true*/, nuclear@0: bool withVsync /*= true*/ ) nuclear@0: { nuclear@0: PredictionValues result; nuclear@0: nuclear@0: result.WithTimewarp = withTimewarp; nuclear@0: result.WithVsync = withVsync; nuclear@0: nuclear@0: // For unclear reasons, most graphics systems add an extra frame of latency nuclear@0: // somewhere along the way. In time we'll debug this and figure it out, but nuclear@0: // for now this gets prediction a little bit better. nuclear@0: const float extraFramesOfBufferingKludge = 1.0f; nuclear@0: nuclear@0: if ( withVsync ) nuclear@0: { nuclear@0: // These are the times from the Present+Flush to when the middle of the scene is "averagely visible" (without timewarp) nuclear@0: // So if you had no timewarp, this, plus the time until the next vsync, is how much to predict by. nuclear@0: result.PresentFlushToRenderedScene = extraFramesOfBufferingKludge * hmdRenderInfo.Shutter.FirstScanlineToLastScanline; nuclear@0: // Predict to the middle of the screen being scanned out. nuclear@0: result.PresentFlushToRenderedScene += hmdRenderInfo.Shutter.VsyncToFirstScanline + 0.5f * hmdRenderInfo.Shutter.FirstScanlineToLastScanline; nuclear@0: // Time for pixels to get half-way to settling. nuclear@0: result.PresentFlushToRenderedScene += hmdRenderInfo.Shutter.PixelSettleTime * 0.5f; nuclear@0: // Predict to half-way through persistence nuclear@0: result.PresentFlushToRenderedScene += hmdRenderInfo.Shutter.PixelPersistence * 0.5f; nuclear@0: nuclear@0: // The time from the Present+Flush to when the first scanline is "averagely visible". nuclear@0: result.PresentFlushToTimewarpStart = extraFramesOfBufferingKludge * hmdRenderInfo.Shutter.FirstScanlineToLastScanline; nuclear@0: // Predict to the first line being scanned out. nuclear@0: result.PresentFlushToTimewarpStart += hmdRenderInfo.Shutter.VsyncToFirstScanline; nuclear@0: // Time for pixels to get half-way to settling. nuclear@0: result.PresentFlushToTimewarpStart += hmdRenderInfo.Shutter.PixelSettleTime * 0.5f; nuclear@0: // Predict to half-way through persistence nuclear@0: result.PresentFlushToTimewarpStart += hmdRenderInfo.Shutter.PixelPersistence * 0.5f; nuclear@0: nuclear@0: // Time to the the last scanline. nuclear@0: result.PresentFlushToTimewarpEnd = result.PresentFlushToTimewarpStart + hmdRenderInfo.Shutter.FirstScanlineToLastScanline; nuclear@0: nuclear@0: // Ideal framerate. nuclear@0: result.PresentFlushToPresentFlush = hmdRenderInfo.Shutter.VsyncToNextVsync; nuclear@0: } nuclear@0: else nuclear@0: { nuclear@0: // Timewarp without vsync is a little odd. nuclear@0: // Currently, we assume that without vsync, we have no idea which scanline nuclear@0: // is currently being sent to the display. So we can't do lerping timewarp, nuclear@0: // we can just do a full-screen late-stage fixup. nuclear@0: nuclear@0: // "PresentFlushToRenderedScene" means the time from the Present+Flush to when the middle of the scene is "averagely visible" (without timewarp) nuclear@0: // So if you had no timewarp, this, plus the time until the next flush (which is usually the time to render the frame), is how much to predict by. nuclear@0: // Time for pixels to get half-way to settling. nuclear@0: result.PresentFlushToRenderedScene = hmdRenderInfo.Shutter.PixelSettleTime * 0.5f; nuclear@0: // Predict to half-way through persistence nuclear@0: result.PresentFlushToRenderedScene += hmdRenderInfo.Shutter.PixelPersistence * 0.5f; nuclear@0: nuclear@0: // Without vsync, you don't know timings, and so can't do anything useful with lerped warping. nuclear@0: result.PresentFlushToTimewarpStart = result.PresentFlushToRenderedScene; nuclear@0: result.PresentFlushToTimewarpEnd = result.PresentFlushToRenderedScene; nuclear@0: nuclear@0: // There's no concept of "ideal" when vsync is off. nuclear@0: result.PresentFlushToPresentFlush = 0.0f; nuclear@0: } nuclear@0: nuclear@0: return result; nuclear@0: } nuclear@0: nuclear@0: Matrix4f TimewarpComputePoseDelta ( Matrix4f const &renderedViewFromWorld, Matrix4f const &predictedViewFromWorld, Matrix4f const&hmdToEyeViewOffset ) nuclear@0: { nuclear@0: Matrix4f worldFromPredictedView = (hmdToEyeViewOffset * predictedViewFromWorld).InvertedHomogeneousTransform(); nuclear@0: Matrix4f matRenderFromNowStart = (hmdToEyeViewOffset * renderedViewFromWorld) * worldFromPredictedView; nuclear@0: nuclear@0: // The sensor-predicted orientations have: X=right, Y=up, Z=backwards. nuclear@0: // The vectors inside the mesh are in NDC to keep the shader simple: X=right, Y=down, Z=forwards. nuclear@0: // So we need to perform a similarity transform on this delta matrix. nuclear@0: // The verbose code would look like this: nuclear@0: /* nuclear@0: Matrix4f matBasisChange; nuclear@0: matBasisChange.SetIdentity(); nuclear@0: matBasisChange.M[0][0] = 1.0f; nuclear@0: matBasisChange.M[1][1] = -1.0f; nuclear@0: matBasisChange.M[2][2] = -1.0f; nuclear@0: Matrix4f matBasisChangeInv = matBasisChange.Inverted(); nuclear@0: matRenderFromNow = matBasisChangeInv * matRenderFromNow * matBasisChange; nuclear@0: */ nuclear@0: // ...but of course all the above is a constant transform and much more easily done. nuclear@0: // We flip the signs of the Y&Z row, then flip the signs of the Y&Z column, nuclear@0: // and of course most of the flips cancel: nuclear@0: // +++ +-- +-- nuclear@0: // +++ -> flip Y&Z columns -> +-- -> flip Y&Z rows -> -++ nuclear@0: // +++ +-- -++ nuclear@0: matRenderFromNowStart.M[0][1] = -matRenderFromNowStart.M[0][1]; nuclear@0: matRenderFromNowStart.M[0][2] = -matRenderFromNowStart.M[0][2]; nuclear@0: matRenderFromNowStart.M[1][0] = -matRenderFromNowStart.M[1][0]; nuclear@0: matRenderFromNowStart.M[2][0] = -matRenderFromNowStart.M[2][0]; nuclear@0: matRenderFromNowStart.M[1][3] = -matRenderFromNowStart.M[1][3]; nuclear@0: matRenderFromNowStart.M[2][3] = -matRenderFromNowStart.M[2][3]; nuclear@0: nuclear@0: return matRenderFromNowStart; nuclear@0: } nuclear@0: nuclear@0: Matrix4f TimewarpComputePoseDeltaPosition ( Matrix4f const &renderedViewFromWorld, Matrix4f const &predictedViewFromWorld, Matrix4f const&hmdToEyeViewOffset ) nuclear@0: { nuclear@0: Matrix4f worldFromPredictedView = (hmdToEyeViewOffset * predictedViewFromWorld).InvertedHomogeneousTransform(); nuclear@0: Matrix4f matRenderXform = (hmdToEyeViewOffset * renderedViewFromWorld) * worldFromPredictedView; nuclear@0: nuclear@0: return matRenderXform.Inverted(); nuclear@0: } nuclear@0: nuclear@0: TimewarpMachine::TimewarpMachine() nuclear@0: : VsyncEnabled(false), nuclear@0: RenderInfo(), nuclear@0: CurrentPredictionValues(), nuclear@0: DistortionTimeCount(0), nuclear@0: DistortionTimeCurrentStart(0.0), nuclear@0: //DistortionTimes[], nuclear@0: DistortionTimeAverage(0.f), nuclear@0: //EyeRenderPoses[], nuclear@0: LastFramePresentFlushTime(0.0), nuclear@0: PresentFlushToPresentFlushSeconds(0.f), nuclear@0: NextFramePresentFlushTime(0.0) nuclear@0: { nuclear@0: #if defined(OVR_BUILD_DEBUG) nuclear@0: memset(DistortionTimes, 0, sizeof(DistortionTimes)); nuclear@0: #endif nuclear@0: nuclear@0: for ( int i = 0; i < 2; i++ ) nuclear@0: { nuclear@0: EyeRenderPoses[i] = Posef(); nuclear@0: } nuclear@0: } nuclear@0: nuclear@0: void TimewarpMachine::Reset(HmdRenderInfo& renderInfo, bool vsyncEnabled, double timeNow) nuclear@0: { nuclear@0: RenderInfo = renderInfo; nuclear@0: VsyncEnabled = vsyncEnabled; nuclear@0: CurrentPredictionValues = PredictionGetDeviceValues ( renderInfo, true, VsyncEnabled ); nuclear@0: PresentFlushToPresentFlushSeconds = 0.0f; nuclear@0: DistortionTimeCount = 0; nuclear@0: DistortionTimeAverage = 0.0f; nuclear@0: LastFramePresentFlushTime = timeNow; nuclear@0: AfterPresentAndFlush(timeNow); nuclear@0: } nuclear@0: nuclear@0: void TimewarpMachine::AfterPresentAndFlush(double timeNow) nuclear@0: { nuclear@0: AfterPresentWithoutFlush(); nuclear@0: AfterPresentFinishes ( timeNow ); nuclear@0: } nuclear@0: nuclear@0: void TimewarpMachine::AfterPresentWithoutFlush() nuclear@0: { nuclear@0: // We've only issued the Present - it hasn't actually finished (i.e. appeared) nuclear@0: // But we need to estimate when the next Present will appear, so extrapolate from previous data. nuclear@0: NextFramePresentFlushTime = LastFramePresentFlushTime + 2.0 * (double)PresentFlushToPresentFlushSeconds; nuclear@0: } nuclear@0: nuclear@0: void TimewarpMachine::AfterPresentFinishes(double timeNow) nuclear@0: { nuclear@0: // The present has now actually happened. nuclear@0: PresentFlushToPresentFlushSeconds = (float)(timeNow - LastFramePresentFlushTime); nuclear@0: LastFramePresentFlushTime = timeNow; nuclear@0: NextFramePresentFlushTime = timeNow + (double)PresentFlushToPresentFlushSeconds; nuclear@0: } nuclear@0: nuclear@0: nuclear@0: nuclear@0: double TimewarpMachine::GetViewRenderPredictionTime() nuclear@0: { nuclear@0: // Note that PredictionGetDeviceValues() did all the vsync-dependent thinking for us. nuclear@0: return NextFramePresentFlushTime + CurrentPredictionValues.PresentFlushToRenderedScene; nuclear@0: } nuclear@0: nuclear@0: bool TimewarpMachine::GetViewRenderPredictionPose(SensorStateReader* reader, Posef& pose) nuclear@0: { nuclear@0: return reader->GetPoseAtTime(GetViewRenderPredictionTime(), pose); nuclear@0: } nuclear@0: nuclear@0: double TimewarpMachine::GetVisiblePixelTimeStart() nuclear@0: { nuclear@0: // Note that PredictionGetDeviceValues() did all the vsync-dependent thinking for us. nuclear@0: return NextFramePresentFlushTime + CurrentPredictionValues.PresentFlushToTimewarpStart; nuclear@0: } nuclear@0: double TimewarpMachine::GetVisiblePixelTimeEnd() nuclear@0: { nuclear@0: // Note that PredictionGetDeviceValues() did all the vsync-dependent thinking for us. nuclear@0: return NextFramePresentFlushTime + CurrentPredictionValues.PresentFlushToTimewarpEnd; nuclear@0: } nuclear@0: bool TimewarpMachine::GetPredictedVisiblePixelPoseStart(SensorStateReader* reader, Posef& pose) nuclear@0: { nuclear@0: return reader->GetPoseAtTime(GetVisiblePixelTimeStart(), pose); nuclear@0: } nuclear@0: bool TimewarpMachine::GetPredictedVisiblePixelPoseEnd(SensorStateReader* reader, Posef& pose) nuclear@0: { nuclear@0: return reader->GetPoseAtTime(GetVisiblePixelTimeEnd(), pose); nuclear@0: } nuclear@0: bool TimewarpMachine::GetTimewarpDeltaStart(SensorStateReader* reader, Posef const &renderedPose, Matrix4f& transform) nuclear@0: { nuclear@0: Posef visiblePose; nuclear@0: if (!GetPredictedVisiblePixelPoseStart(reader, visiblePose)) nuclear@0: { nuclear@0: return false; nuclear@0: } nuclear@0: nuclear@0: Matrix4f visibleMatrix(visiblePose); nuclear@0: Matrix4f renderedMatrix(renderedPose); nuclear@0: Matrix4f identity; // doesn't matter for orientation-only timewarp nuclear@0: transform = TimewarpComputePoseDelta ( renderedMatrix, visibleMatrix, identity ); nuclear@0: nuclear@0: return true; nuclear@0: } nuclear@0: bool TimewarpMachine::GetTimewarpDeltaEnd(SensorStateReader* reader, Posef const &renderedPose, Matrix4f& transform) nuclear@0: { nuclear@0: Posef visiblePose; nuclear@0: if (!GetPredictedVisiblePixelPoseEnd(reader, visiblePose)) nuclear@0: { nuclear@0: return false; nuclear@0: } nuclear@0: nuclear@0: Matrix4f visibleMatrix(visiblePose); nuclear@0: Matrix4f renderedMatrix(renderedPose); nuclear@0: Matrix4f identity; // doesn't matter for orientation-only timewarp nuclear@0: transform = TimewarpComputePoseDelta ( renderedMatrix, visibleMatrix, identity ); nuclear@0: nuclear@0: return true; nuclear@0: } nuclear@0: nuclear@0: nuclear@0: // What time should the app wait until before starting distortion? nuclear@0: double TimewarpMachine::JustInTime_GetDistortionWaitUntilTime() nuclear@0: { nuclear@0: if ( !VsyncEnabled || ( DistortionTimeCount < NumDistortionTimes ) ) nuclear@0: { nuclear@0: // Don't wait. nuclear@0: return LastFramePresentFlushTime; nuclear@0: } nuclear@0: nuclear@0: // Note - 1-2ms fudge factor (because Windows timer granularity etc) is NOT added here, nuclear@0: // because otherwise you end up adding multiple fudge factors! nuclear@0: // So it's left for the calling app to add just one fudge factor. nuclear@0: nuclear@0: float howLongBeforePresent = DistortionTimeAverage; nuclear@0: // Subtlety here. Technically, the correct time is NextFramePresentFlushTime - howLongBeforePresent. nuclear@0: // However, if the app drops a frame, this then perpetuates it, nuclear@0: // i.e. if the display is running at 60fps, but the last frame was slow, nuclear@0: // (e.g. because of swapping or whatever), then NextFramePresentFlushTime is nuclear@0: // 33ms in the future, not 16ms. Since this function supplies the nuclear@0: // time to wait until, the app will indeed wait until 32ms, so the framerate nuclear@0: // drops to 30fps and never comes back up! nuclear@0: // So we return the *ideal* framerate, not the *actual* framerate. nuclear@0: return LastFramePresentFlushTime + (float)( CurrentPredictionValues.PresentFlushToPresentFlush - howLongBeforePresent ); nuclear@0: } nuclear@0: nuclear@0: double TimewarpMachine::JustInTime_AverageDistortionTime() nuclear@0: { nuclear@0: if ( JustInTime_NeedDistortionTimeMeasurement() ) nuclear@0: { nuclear@0: return 0.0; nuclear@0: } nuclear@0: return DistortionTimeAverage; nuclear@0: } nuclear@0: nuclear@0: bool TimewarpMachine::JustInTime_NeedDistortionTimeMeasurement() const nuclear@0: { nuclear@0: if (!VsyncEnabled) nuclear@0: { nuclear@0: return false; nuclear@0: } nuclear@0: return ( DistortionTimeCount < NumDistortionTimes ); nuclear@0: } nuclear@0: nuclear@0: void TimewarpMachine::JustInTime_BeforeDistortionTimeMeasurement(double timeNow) nuclear@0: { nuclear@0: DistortionTimeCurrentStart = timeNow; nuclear@0: } nuclear@0: nuclear@0: void TimewarpMachine::JustInTime_AfterDistortionTimeMeasurement(double timeNow) nuclear@0: { nuclear@0: float timeDelta = (float)( timeNow - DistortionTimeCurrentStart ); nuclear@0: if ( DistortionTimeCount < NumDistortionTimes ) nuclear@0: { nuclear@0: DistortionTimes[DistortionTimeCount] = timeDelta; nuclear@0: DistortionTimeCount++; nuclear@0: if ( DistortionTimeCount == NumDistortionTimes ) nuclear@0: { nuclear@0: // Median. nuclear@0: float distortionTimeMedian = 0.0f; nuclear@0: for ( int i = 0; i < NumDistortionTimes/2; i++ ) nuclear@0: { nuclear@0: // Find the maximum time of those remaining. nuclear@0: float maxTime = DistortionTimes[0]; nuclear@0: int maxIndex = 0; nuclear@0: for ( int j = 1; j < NumDistortionTimes; j++ ) nuclear@0: { nuclear@0: if ( maxTime < DistortionTimes[j] ) nuclear@0: { nuclear@0: maxTime = DistortionTimes[j]; nuclear@0: maxIndex = j; nuclear@0: } nuclear@0: } nuclear@0: // Zero that max time, so we'll find the next-highest time. nuclear@0: DistortionTimes[maxIndex] = 0.0f; nuclear@0: distortionTimeMedian = maxTime; nuclear@0: } nuclear@0: DistortionTimeAverage = distortionTimeMedian; nuclear@0: } nuclear@0: } nuclear@0: else nuclear@0: { nuclear@0: OVR_ASSERT ( !"Really didn't need more measurements, thanks" ); nuclear@0: } nuclear@0: } nuclear@0: nuclear@0: nuclear@0: }}} // OVR::Util::Render nuclear@0: