Index: third_party/WebKit/Source/modules/vr/VRDisplay.cpp |
diff --git a/third_party/WebKit/Source/modules/vr/VRDisplay.cpp b/third_party/WebKit/Source/modules/vr/VRDisplay.cpp |
index c35e7f133671d7e0e39b8124b7fbd0ee82798014..5fe8300e78a359b12437c928a75ae2adfacc3c65 100644 |
--- a/third_party/WebKit/Source/modules/vr/VRDisplay.cpp |
+++ b/third_party/WebKit/Source/modules/vr/VRDisplay.cpp |
@@ -8,8 +8,8 @@ |
#include "core/dom/DOMException.h" |
#include "core/dom/DocumentUserGestureToken.h" |
#include "core/dom/FrameRequestCallback.h" |
-#include "core/dom/Fullscreen.h" |
#include "core/dom/ScriptedAnimationController.h" |
+#include "core/frame/FrameView.h" |
#include "core/frame/UseCounter.h" |
#include "core/inspector/ConsoleMessage.h" |
#include "core/loader/DocumentLoader.h" |
@@ -26,19 +26,16 @@ |
#include "modules/webgl/WebGLRenderingContextBase.h" |
#include "platform/Histogram.h" |
#include "platform/UserGestureIndicator.h" |
+#include "platform/instrumentation/tracing/TraceEvent.h" |
#include "public/platform/Platform.h" |
#include "wtf/AutoReset.h" |
-#include <array> |
+#include <time.h> |
namespace blink { |
namespace { |
-// Magic numbers used to mark valid pose index values encoded in frame |
-// data. Must match the magic numbers used in vr_shell.cc. |
-static constexpr std::array<uint8_t, 2> kWebVrPosePixelMagicNumbers{{42, 142}}; |
- |
VREye stringToVREye(const String& whichEye) { |
if (whichEye == "left") |
return VREyeLeft; |
@@ -58,11 +55,7 @@ class VRDisplayFrameRequestCallback : public FrameRequestCallback { |
if (!doc) |
return; |
- // Need to divide by 1000 here because serviceScriptedAnimations expects |
- // time to be given in seconds. |
- m_vrDisplay->serviceScriptedAnimations( |
- doc->loader()->timing().pseudoWallTimeToMonotonicTime(highResTimeMs / |
- 1000.0)); |
+ m_vrDisplay->frameTick(highResTimeMs); |
} |
DEFINE_INLINE_VIRTUAL_TRACE() { |
@@ -76,6 +69,62 @@ class VRDisplayFrameRequestCallback : public FrameRequestCallback { |
} // namespace |
+StatTracker::StatTracker(unsigned int capacity) : m_capacity(capacity) {} |
+ |
+StatTracker::~StatTracker() = default; |
+ |
+void StatTracker::add(double item) { |
+ if (m_items.size() >= m_capacity) { |
+ m_items.pop_front(); |
+ } |
+ m_items.push_back(item); |
+} |
+ |
+void StatTracker::clear() { |
+ m_items.clear(); |
+} |
+ |
+bool StatTracker::hasPrediction() { |
+ return m_items.size() > 0; |
+} |
+ |
+double StatTracker::getPrediction() { |
+ assert(hasPrediction()); |
+ |
+ // If we have 3 or more items, ignore min and max outliers and |
+ // average the rest. For 2 or less, minmax.first and minmax.second |
+ // will both be m_items.end(), so it's just a plain average. |
+ auto minmax = m_items.size() > 2 ? |
+ std::minmax_element(m_items.begin(), m_items.end()) : |
+ std::minmax_element(m_items.end(), m_items.end()); |
+ |
+ double sum = 0.0; |
+ int count = 0; |
+ //VLOG(2) << __FUNCTION__ << ": stat start"; |
+ for (auto it = m_items.begin(); it != m_items.end(); ++it) { |
+ //VLOG(2) << __FUNCTION__ << ": val=" << *it; |
+ if (it == minmax.first || it == minmax.second) continue; |
+ sum += *it; |
+ ++count; |
+ } |
+ //VLOG(2) << __FUNCTION__ << ": stat return " << sum / count; |
+ return sum / count; |
+} |
+ |
+static double getMonotonicTimestampMs() { |
+#if defined(OS_ANDROID) |
+ // Android surfacetexture timestamp compatible timer? See: |
+ // http://androidxref.com/7.0.0_r1/xref/frameworks/native/libs/gui/Surface.cpp#370 |
+ // http://androidxref.com/7.0.0_r1/xref/frameworks/rs/rsCppUtils.h#162 |
+ struct timespec t; |
+ t.tv_sec = t.tv_nsec = 0; |
+ clock_gettime(CLOCK_MONOTONIC, &t); |
+ return t.tv_sec * 1e3 + t.tv_nsec * 1e-6; |
+#else |
+ return 0.0; |
+#endif |
+} |
+ |
VRDisplay::VRDisplay(NavigatorVR* navigatorVR, |
device::mojom::blink::VRDisplayPtr display, |
device::mojom::blink::VRDisplayClientRequest request) |
@@ -84,26 +133,34 @@ VRDisplay::VRDisplay(NavigatorVR* navigatorVR, |
m_isConnected(false), |
m_isPresenting(false), |
m_isValidDeviceForPresenting(true), |
+ m_framesPending(0), |
m_canUpdateFramePose(true), |
+ m_canSubmitFramePose(false), |
m_capabilities(new VRDisplayCapabilities()), |
m_eyeParametersLeft(new VREyeParameters()), |
m_eyeParametersRight(new VREyeParameters()), |
m_depthNear(0.01), |
m_depthFar(10000.0), |
- m_fullscreenCheckTimer(this, &VRDisplay::onFullscreenCheck), |
m_contextGL(nullptr), |
m_animationCallbackRequested(false), |
m_inAnimationFrame(false), |
+ m_surfaceHandle(0), |
m_display(std::move(display)), |
- m_binding(this, std::move(request)) {} |
+ m_binding(this, std::move(request)), |
+ m_pose_client_binding(this) { |
+ VLOG(1) << __FUNCTION__ << ": CONSTRUCTOR this=" << (void*)this << " m_display=" << m_display << " ***********************************************************************************************************"; |
+} |
-VRDisplay::~VRDisplay() {} |
+VRDisplay::~VRDisplay() { |
+ VLOG(1) << __FUNCTION__ << ": DESTRUCTOR this=" << (void*)this << " m_display=" << m_display << " ***********************************************************************************************************"; |
+} |
VRController* VRDisplay::controller() { |
return m_navigatorVR->controller(); |
} |
void VRDisplay::update(const device::mojom::blink::VRDisplayInfoPtr& display) { |
+ VLOG(1) << __FUNCTION__ << ": displayName=" << display->displayName; |
m_displayId = display->index; |
m_displayName = display->displayName; |
m_isConnected = true; |
@@ -118,6 +175,7 @@ void VRDisplay::update(const device::mojom::blink::VRDisplayInfoPtr& display) { |
// Ignore non presenting delegate |
bool isValid = display->leftEye->renderWidth > 0; |
bool needOnPresentChange = false; |
+ VLOG(1) << __FUNCTION__ << ": m_isPresenting=" << m_isPresenting << " isValid=" << isValid << " m_isValidDeviceForPresenting=" << m_isValidDeviceForPresenting; |
if (m_isPresenting && isValid && !m_isValidDeviceForPresenting) { |
needOnPresentChange = true; |
} |
@@ -136,15 +194,27 @@ void VRDisplay::update(const device::mojom::blink::VRDisplayInfoPtr& display) { |
if (needOnPresentChange) { |
OnPresentChange(); |
} |
+ VLOG(1) << __FUNCTION__ << ": done"; |
} |
void VRDisplay::disconnected() { |
+ VLOG(1) << __FUNCTION__; |
if (m_isConnected) |
m_isConnected = !m_isConnected; |
} |
bool VRDisplay::getFrameData(VRFrameData* frameData) { |
+ TRACE_EVENT1("media", "klausw:getFrameData", "frame", m_framePose ? m_framePose->poseIndex + 1 : -1); |
+ double get_pose_ms = getMonotonicTimestampMs(); |
updatePose(); |
+ if (m_framePose) { |
+ double got_pose_ms = getMonotonicTimestampMs(); |
+ // TODO(klausw): why do these show 0.1ms deltas while updateFrame shows 1ms+? |
+ if (m_framePose->ts_getPose == 0.0) { |
+ m_framePose->ts_getPose = get_pose_ms; |
+ m_framePose->ts_gotPose = got_pose_ms; |
+ } |
+ } |
if (!m_framePose) |
return false; |
@@ -155,35 +225,88 @@ bool VRDisplay::getFrameData(VRFrameData* frameData) { |
if (m_depthNear == m_depthFar) |
return false; |
- return frameData->update(m_framePose, m_eyeParametersLeft, |
- m_eyeParametersRight, m_depthNear, m_depthFar); |
+ bool ret = frameData->update(m_framePose, m_eyeParametersLeft, |
+ m_eyeParametersRight, m_depthNear, m_depthFar); |
+ VLOG_IF(2, m_framePose) << __FUNCTION__ << ": frame " << m_framePose->poseIndex << ", recommended eye renderWidth/Height=" << m_eyeParametersLeft->renderWidth() << "x" << m_eyeParametersLeft->renderHeight(); |
+ |
+ return ret; |
} |
VRPose* VRDisplay::getPose() { |
+ TRACE_EVENT1("media", "klausw:getPose", "next frame ", m_framePose ? m_framePose->poseIndex + 1 : -1); |
+ double get_pose_ms = getMonotonicTimestampMs(); |
updatePose(); |
- |
- if (!m_framePose) |
+ double got_pose_ms = getMonotonicTimestampMs(); |
+ if (!m_framePose) { |
+ VLOG(2) << __FUNCTION__ << ": no pose for next frame "; |
return nullptr; |
+ } |
+ |
+ if (m_framePose->ts_getPose == 0.0) { |
+ m_framePose->ts_getPose = get_pose_ms; |
+ m_framePose->ts_gotPose = got_pose_ms; |
+ } |
VRPose* pose = VRPose::create(); |
pose->setPose(m_framePose); |
+ VLOG(2) << __FUNCTION__ << ": next frame " << m_framePose->poseIndex; |
return pose; |
} |
+void VRDisplay::requestPose() { |
+ VLOG(2) << __FUNCTION__; |
+ m_poseCallbackPending = true; |
+ m_display->GetPose(m_pose_client_binding.CreateInterfacePtrAndBind()); |
+} |
+ |
+void VRDisplay::OnPoseReceived(device::mojom::blink::VRPosePtr pose) { |
+ VLOG(2) << __FUNCTION__; |
+ m_framePose = std::move(pose); |
+ m_poseCallbackPending = false; |
+} |
+ |
void VRDisplay::updatePose() { |
+ VLOG(2) << __FUNCTION__ << ": display=" << m_display << " canUpdateFramePose=" << m_canUpdateFramePose; |
if (m_displayBlurred) { |
// WebVR spec says to return a null pose when the display is blurred. |
m_framePose = nullptr; |
return; |
} |
- if (m_canUpdateFramePose) { |
+ // If a pose was prefetched and is complete, do nothing. |
+ // If prefetch is still pending, wait for it to complete. |
+ // If no prefetch was started, start it and wait for result. |
+ if (m_canUpdateFramePose || m_poseCallbackPending) { |
if (!m_display) |
return; |
device::mojom::blink::VRPosePtr pose; |
- m_display->GetPose(&pose); |
- m_framePose = std::move(pose); |
- if (m_isPresenting) |
+ TRACE_EVENT1("media", "klausw:updatePose", "frame", m_framePose ? m_framePose->poseIndex + 1 : -1); |
+ double get_pose_ms = getMonotonicTimestampMs(); |
+ if (!m_poseCallbackPending) { |
+ requestPose(); |
+ } |
+ while (m_poseCallbackPending) { |
+ {VLOG(2) << __FUNCTION__ << ": wait for pose";} |
+ if (!m_pose_client_binding.WaitForIncomingMethodCall()) { |
+ LOG(ERROR) << __FUNCTION__ << ": failed to receive a pose"; |
+ } |
+ } |
+ if (!m_framePose) { |
+ VLOG(2) << __FUNCTION__ << ": did not get a pose"; |
+ return; |
+ } |
+ double got_pose_ms = getMonotonicTimestampMs(); |
+ m_canSubmitFramePose = true; |
+ VLOG(2) << __FUNCTION__ << ": updatePose to " << (m_framePose ? m_framePose->poseIndex : -1) << " took " << got_pose_ms - get_pose_ms << "ms"; |
+ // For this newly fetched frame, zero the timers and let getPose |
+ // or getFrameData update it. This way we can ensure that we |
+ // measure the actual JS pause time one time, and don't overwrite |
+ // it for redundant calls. If we prefetch the pose, the underlying |
+ // fetch time wouldn't matter. |
+ m_framePose->ts_getPose = 0.0; |
+ m_framePose->ts_gotPose = 0.0; |
+ if (m_isPresenting) { |
m_canUpdateFramePose = false; |
+ } |
} |
} |
@@ -197,8 +320,10 @@ void VRDisplay::resetPose() { |
VREyeParameters* VRDisplay::getEyeParameters(const String& whichEye) { |
switch (stringToVREye(whichEye)) { |
case VREyeLeft: |
+ VLOG(1) << __FUNCTION__ << ": left renderWidth/Height=" << m_eyeParametersLeft->renderWidth() << "x" << m_eyeParametersLeft->renderHeight(); |
return m_eyeParametersLeft; |
case VREyeRight: |
+ VLOG(1) << __FUNCTION__ << ": right renderWidth/Height=" << m_eyeParametersRight->renderWidth() << "x" << m_eyeParametersRight->renderHeight(); |
return m_eyeParametersRight; |
default: |
return nullptr; |
@@ -206,6 +331,7 @@ VREyeParameters* VRDisplay::getEyeParameters(const String& whichEye) { |
} |
int VRDisplay::requestAnimationFrame(FrameRequestCallback* callback) { |
+ TRACE_EVENT1("media", "klausw:rAF", "frame", m_framePose ? m_framePose->poseIndex : -1); |
Document* doc = this->document(); |
if (!doc) |
return 0; |
@@ -226,6 +352,7 @@ void VRDisplay::cancelAnimationFrame(int id) { |
} |
void VRDisplay::OnBlur() { |
+ VLOG(1) << __FUNCTION__; |
m_displayBlurred = true; |
m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( |
@@ -233,6 +360,7 @@ void VRDisplay::OnBlur() { |
} |
void VRDisplay::OnFocus() { |
+ VLOG(1) << __FUNCTION__; |
m_displayBlurred = false; |
// Restart our internal doc requestAnimationFrame callback, if it fired while |
// the display was blurred. |
@@ -249,9 +377,108 @@ void VRDisplay::OnFocus() { |
EventTypeNames::vrdisplayfocus, true, false, this, "")); |
} |
-void VRDisplay::serviceScriptedAnimations(double monotonicAnimationStartTime) { |
+#if 0 |
+static void delayTest() { |
+ VLOG(2) << __FUNCTION__ << ": delay got executed"; |
+} |
+#endif |
+ |
+ |
+ // TODO(klausw): refactor: |
+ // replace serviceScriptedAnimations with new callback chain sequence: |
+ // | tick getPose service_rAF | tick |
+ // | 0ms 4ms 8ms 16.7ms| 0ms |
+ // |
+ // // Save tickStart at VRDisplayFrameRequestCallback.handleEvent? |
+ // timerTick |
+ // tickStart=now() |
+ // postDelayedTask(getPoseForFrame, poseOffset) |
+ // getPoseForFrame |
+ // getPose // TODO: getPoseAsync(gotPoseCallback), then next step in gotPoseCallback |
+ // postDelayedTask(service_rAF_offset - (now() - tickStart)) |
+ |
+ |
+void VRDisplay::frameTick(double highResTimeMs) { |
+ m_tickIsScheduled = false; |
if (!m_scriptedAnimationController) |
return; |
+ TRACE_EVENT1("media", "frameTick", "frame", m_framePose ? m_framePose->poseIndex + 1 : -1); |
+ |
+#if 0 |
+ Platform::current()->currentThread()->getWebTaskRunner()->postDelayedTask( |
+ BLINK_FROM_HERE, WTF::bind(&delayTest), 8.0); |
+#endif |
+ |
+ double timeNow = getMonotonicTimestampMs(); |
+ m_frameTickStartMs = timeNow; |
+ m_highResTimeMsAtLastFrameTick = highResTimeMs; |
+ |
+ VLOG(2) << __FUNCTION__ << ": next frame " << (m_framePose ? m_framePose->poseIndex + 1 : -1) << " pending=" << m_framesPending; |
+ bool skipThisTick = false; |
+ if (m_framesPending > 1) { |
+ LOG(ERROR) << __FUNCTION__ << ": SHOULD NOT HAPPEN: too many frames pending, want <=1, have " << m_framesPending; |
+ skipThisTick = true; |
+ } else if (m_frameWaitingToSubmit) { |
+ VLOG(2) << __FUNCTION__ << ": have frame waiting to be submitted"; |
+ // Don't pump the timer loop, that will happen when the pending frame completes. |
+ return; |
+ } else if (m_framesPending > 0 && |
+ m_historyPreSubmitTimeMs.hasPrediction() && |
+ m_historyPostSubmitTimeMs.hasPrediction()) { |
+#if 0 |
+ double expectedRenderTime = m_historyPostSubmitTimeMs.getPrediction(); |
+ double expectedFinishRender = m_submitExecuteMs + expectedRenderTime - timeNow; |
+ // Simple logic - skip this frame if remaining rendering will take |
+ // more than one frame. |
+ if (expectedFinishRender > frameTime + 5.0) skipThisTick = true; |
+ VLOG(2) << __FUNCTION__ << ": expectedRenderTime=" << |
+ expectedRenderTime << " expectedFinishRender=" << |
+ expectedFinishRender << " frameTime=" << frameTime << " skip=" << |
+ skipThisTick; |
+#endif |
+#if 0 |
+ // Alternate attempt - try to throttle to 30fps if not hitting 60fps. |
+ // Doesn't work great at this point, needs further tuning and testing. |
+ double allowedTime = frameTime + 5.0; |
+ double expectedSubmit = m_historyPreSubmitTimeMs.getPrediction(); |
+ if (expectedFinishRender + expectedSubmit > allowedTime) skipThisTick = true; |
+ VLOG(2) << __FUNCTION__ << ": expectedRenderTime=" << expectedRenderTime << " expectedFinishRender=" << expectedFinishRender << " expectedSubmit=" << expectedSubmit << " frameTime=" << frameTime << " skip=" << skipThisTick; |
+#endif |
+ } |
+ { |
+ double frameTimeMs = 1000.0 / 60; // TODO(klausw): measure instead of assuming 60fps. |
+ if (timeNow - m_submitExecuteMs < frameTimeMs / 2) { |
+ // Last submission was less than half a frame ago. |
+ skipThisTick = true; |
+ VLOG(2) << __FUNCTION__ << ": last submit was " << timeNow - m_submitExecuteMs << "ms ago, skip=" << skipThisTick; |
+ } |
+ } |
+ if (skipThisTick) { |
+ VLOG(2) << __FUNCTION__ << ": SKIP FRAME, pending=" << m_framesPending; |
+ rescheduleAtNextTick(); |
+ return; |
+ } |
+ |
+ serviceScriptedAnimations(); |
+} |
+ |
+void VRDisplay::rescheduleAtNextTick() { |
+ if (m_tickIsScheduled) return; |
+ m_tickIsScheduled = true; |
+ |
+ Document* doc = m_navigatorVR->document(); |
+ if (!doc) { |
+ VLOG(2) << __FUNCTION__ << ": cannot skip frame, no document?!"; |
+ return; |
+ } |
+ doc->requestAnimationFrame(new VRDisplayFrameRequestCallback(this)); |
+} |
+ |
+void VRDisplay::serviceScriptedAnimations() { |
+ if (!m_scriptedAnimationController) |
+ return; |
+ TRACE_EVENT1("media", "serviceScriptedAnimations", "frame", m_framePose ? m_framePose->poseIndex + 1 : -1); |
+ |
AutoReset<bool> animating(&m_inAnimationFrame, true); |
m_animationCallbackRequested = false; |
@@ -261,8 +488,34 @@ void VRDisplay::serviceScriptedAnimations(double monotonicAnimationStartTime) { |
// so we don't fire the user's callback until the display is focused. |
if (m_displayBlurred) |
return; |
+ |
+ m_serviceStartMs = getMonotonicTimestampMs(); |
+ |
+ // Adjust high res timer since some time may have passed since the frame tick. |
+ double highResTimeMs = m_highResTimeMsAtLastFrameTick + |
+ m_serviceStartMs - m_frameTickStartMs; |
+ |
+ if (m_isPresenting && m_canUpdateFramePose) { |
+ // Prefetch pose asynchronously to avoid waiting for it. |
+ // TODO(klausw): align its timing so that it arrives just |
+ // in time depending on when JS usually asks for it. |
+ m_canUpdateFramePose = false; |
+ requestPose(); |
+ } |
+ |
+ Document* doc = m_navigatorVR->document(); |
+ if (!doc || !doc->loader()) { |
+ VLOG(2) << __FUNCTION__ << ": no document"; |
+ return; |
+ } |
+ // Need to divide by 1000 here because serviceScriptedAnimations expects |
+ // time to be given in seconds. |
+ double monotonicAnimationStartTime = |
+ doc->loader()->timing().pseudoWallTimeToMonotonicTime(highResTimeMs / |
+ 1000.0); |
m_scriptedAnimationController->serviceScriptedAnimations( |
monotonicAnimationStartTime); |
+ m_tickIsScheduled = true; |
} |
void ReportPresentationResult(PresentationResult result) { |
@@ -278,6 +531,7 @@ void ReportPresentationResult(PresentationResult result) { |
ScriptPromise VRDisplay::requestPresent(ScriptState* scriptState, |
const HeapVector<VRLayer>& layers) { |
+ VLOG(1) << __FUNCTION__; |
ExecutionContext* executionContext = scriptState->getExecutionContext(); |
UseCounter::count(executionContext, UseCounter::VRRequestPresent); |
if (!executionContext->isSecureContext()) { |
@@ -293,6 +547,7 @@ ScriptPromise VRDisplay::requestPresent(ScriptState* scriptState, |
// If the VRDisplay does not advertise the ability to present reject the |
// request. |
if (!m_capabilities->canPresent()) { |
+ VLOG(1) << __FUNCTION__ << ": REJECT: VRDisplay cannot present"; |
DOMException* exception = |
DOMException::create(InvalidStateError, "VRDisplay cannot present."); |
resolver->reject(exception); |
@@ -306,16 +561,21 @@ ScriptPromise VRDisplay::requestPresent(ScriptState* scriptState, |
// If the VRDisplay is already presenting, however, repeated calls are |
// allowed outside a user gesture so that the presented content may be |
// updated. |
+#define HACK_DISABLE_USER_GESTURE_REQUIREMENT 0 |
+#if !HACK_DISABLE_USER_GESTURE_REQUIREMENT |
if (firstPresent && !UserGestureIndicator::utilizeUserGesture()) { |
+ VLOG(1) << __FUNCTION__ << ": REJECT: API can only be initiated by a user gesture"; |
DOMException* exception = DOMException::create( |
InvalidStateError, "API can only be initiated by a user gesture."); |
resolver->reject(exception); |
ReportPresentationResult(PresentationResult::NotInitiatedByUserGesture); |
return promise; |
} |
+#endif |
// A valid number of layers must be provided in order to present. |
if (layers.size() == 0 || layers.size() > m_capabilities->maxLayers()) { |
+ VLOG(1) << __FUNCTION__ << ": REJECT: Invalid number of layers"; |
forceExitPresent(); |
DOMException* exception = |
DOMException::create(InvalidStateError, "Invalid number of layers."); |
@@ -327,6 +587,7 @@ ScriptPromise VRDisplay::requestPresent(ScriptState* scriptState, |
m_layer = layers[0]; |
if (!m_layer.source()) { |
+ VLOG(1) << __FUNCTION__ << ": REJECT: Invalid layer source"; |
forceExitPresent(); |
DOMException* exception = |
DOMException::create(InvalidStateError, "Invalid layer source."); |
@@ -337,8 +598,13 @@ ScriptPromise VRDisplay::requestPresent(ScriptState* scriptState, |
CanvasRenderingContext* renderingContext = |
m_layer.source()->renderingContext(); |
+ if (firstPresent) { |
+ m_sourceWidth = m_layer.source()->width(); |
+ m_sourceHeight = m_layer.source()->height(); |
+ } |
if (!renderingContext || !renderingContext->is3d()) { |
+ VLOG(1) << __FUNCTION__ << ": REJECT: Layer source must have a WebGLRenderingContext"; |
forceExitPresent(); |
DOMException* exception = DOMException::create( |
InvalidStateError, "Layer source must have a WebGLRenderingContext"); |
@@ -355,6 +621,7 @@ ScriptPromise VRDisplay::requestPresent(ScriptState* scriptState, |
if ((m_layer.leftBounds().size() != 0 && m_layer.leftBounds().size() != 4) || |
(m_layer.rightBounds().size() != 0 && |
m_layer.rightBounds().size() != 4)) { |
+ VLOG(1) << __FUNCTION__ << ": REJECT: Layer bounds must either be an empty array or have 4 values"; |
forceExitPresent(); |
DOMException* exception = DOMException::create( |
InvalidStateError, |
@@ -372,6 +639,7 @@ ScriptPromise VRDisplay::requestPresent(ScriptState* scriptState, |
} else if (firstPresent) { |
bool secureContext = scriptState->getExecutionContext()->isSecureContext(); |
if (!m_display) { |
+ VLOG(1) << __FUNCTION__ << ": REJECT: The service is no longer active"; |
forceExitPresent(); |
DOMException* exception = DOMException::create( |
InvalidStateError, "The service is no longer active."); |
@@ -384,6 +652,7 @@ ScriptPromise VRDisplay::requestPresent(ScriptState* scriptState, |
&VRDisplay::onPresentComplete, |
wrapPersistent(this)))); |
} else { |
+ m_isPresenting = true; |
updateLayerBounds(); |
resolver->resolve(); |
ReportPresentationResult(PresentationResult::SuccessAlreadyPresenting); |
@@ -393,6 +662,7 @@ ScriptPromise VRDisplay::requestPresent(ScriptState* scriptState, |
} |
void VRDisplay::onPresentComplete(bool success) { |
+ VLOG(1) << __FUNCTION__; |
if (success) { |
this->beginPresent(); |
} else { |
@@ -408,6 +678,7 @@ void VRDisplay::onPresentComplete(bool success) { |
} |
ScriptPromise VRDisplay::exitPresent(ScriptState* scriptState) { |
+ VLOG(1) << __FUNCTION__; |
ScriptPromiseResolver* resolver = ScriptPromiseResolver::create(scriptState); |
ScriptPromise promise = resolver->promise(); |
@@ -435,6 +706,7 @@ ScriptPromise VRDisplay::exitPresent(ScriptState* scriptState) { |
} |
void VRDisplay::beginPresent() { |
+ VLOG(1) << __FUNCTION__; |
Document* doc = this->document(); |
std::unique_ptr<UserGestureIndicator> gestureIndicator; |
if (m_capabilities->hasExternalDisplay()) { |
@@ -450,48 +722,6 @@ void VRDisplay::beginPresent() { |
PresentationResult::PresentationNotSupportedByDisplay); |
return; |
} else { |
- // TODO(klausw,crbug.com/655722): Need a proper VR compositor, but |
- // for the moment on mobile we'll just make the canvas fullscreen |
- // so that VrShell can pick it up through the standard (high |
- // latency) compositing path. |
- auto canvas = m_layer.source(); |
- auto inlineStyle = canvas->inlineStyle(); |
- if (inlineStyle) { |
- // THREE.js's VREffect sets explicit style.width/height on its rendering |
- // canvas based on the non-fullscreen window dimensions, and it keeps |
- // those unchanged when presenting. Unfortunately it appears that a |
- // fullscreened canvas just gets centered if it has explicitly set a |
- // size smaller than the fullscreen dimensions. Manually set size to |
- // 100% in this case and restore it when exiting fullscreen. This is a |
- // stopgap measure since THREE.js's usage appears legal according to the |
- // WebVR API spec. This will no longer be necessary once we can get rid |
- // of this fullscreen hack. |
- m_fullscreenOrigWidth = inlineStyle->getPropertyValue(CSSPropertyWidth); |
- if (!m_fullscreenOrigWidth.isNull()) { |
- canvas->setInlineStyleProperty(CSSPropertyWidth, "100%"); |
- } |
- m_fullscreenOrigHeight = inlineStyle->getPropertyValue(CSSPropertyHeight); |
- if (!m_fullscreenOrigHeight.isNull()) { |
- canvas->setInlineStyleProperty(CSSPropertyHeight, "100%"); |
- } |
- } else { |
- m_fullscreenOrigWidth = String(); |
- m_fullscreenOrigHeight = String(); |
- } |
- |
- if (doc) { |
- // Since the callback for requestPresent is asynchronous, we've lost our |
- // UserGestureToken, and need to create a new one to enter fullscreen. |
- gestureIndicator = WTF::wrapUnique( |
- new UserGestureIndicator(DocumentUserGestureToken::create( |
- doc, UserGestureToken::Status::PossiblyExistingGesture))); |
- } |
- Fullscreen::requestFullscreen(*canvas); |
- |
- // Check to see if the canvas is still the current fullscreen |
- // element once every 2 seconds. |
- m_fullscreenCheckTimer.startRepeating(2.0, BLINK_FROM_HERE); |
- m_reenteredFullscreen = false; |
} |
if (doc) { |
@@ -499,6 +729,14 @@ void VRDisplay::beginPresent() { |
WebURL(doc->url())); |
} |
+ // Stop unneeded compositor updates. We do so by hiding the view. We can't |
+ // hide the page, doing so causes an assertion failure (!m_isHidden) in |
+ // DrawingBuffer::prepareTextureMailboxInternal(). Do this only when we're |
+ // actually presenting (m_isPresenting is true), see corresponding show() |
+ // in forceExitPresent(). Otherwise the view may remain hidden for failing |
+ // DON flow. |
+ m_navigatorVR->document()->view()->hide(); |
+ |
m_isPresenting = true; |
ReportPresentationResult(PresentationResult::Success); |
@@ -512,29 +750,30 @@ void VRDisplay::beginPresent() { |
} |
void VRDisplay::forceExitPresent() { |
+ VLOG(1) << __FUNCTION__; |
+ if (m_surfaceHandle) { |
+ m_renderingContext->setSurfaceHandle(0); |
+ m_surfaceHandle = 0; |
+ } |
+ |
if (m_isPresenting) { |
- if (!m_capabilities->hasExternalDisplay()) { |
- auto canvas = m_layer.source(); |
- Fullscreen::fullyExitFullscreen(canvas->document()); |
- m_fullscreenCheckTimer.stop(); |
- if (!m_fullscreenOrigWidth.isNull()) { |
- canvas->setInlineStyleProperty(CSSPropertyWidth, m_fullscreenOrigWidth); |
- m_fullscreenOrigWidth = String(); |
- } |
- if (!m_fullscreenOrigHeight.isNull()) { |
- canvas->setInlineStyleProperty(CSSPropertyWidth, |
- m_fullscreenOrigHeight); |
- m_fullscreenOrigHeight = String(); |
- } |
- } else { |
- // Can't get into this presentation mode, so nothing to do here. |
- } |
m_isPresenting = false; |
+ m_canUpdateFramePose = true; |
OnPresentChange(); |
+ m_navigatorVR->document()->view()->show(); |
} |
m_renderingContext = nullptr; |
m_contextGL = nullptr; |
+ m_contextProvider = nullptr; |
+ |
+ VLOG(2) << __FUNCTION__ << ": lost surface, reset m_framesPending " << m_framesPending << " => " << 0; |
+ m_framesPending = 0; |
+ if (m_frameWaitingToSubmit) { |
+ m_frameWaitingToSubmit = false; |
+ // Make sure not to lose a rAF call if we ignore a pending frame. |
+ serviceScriptedAnimations(); |
+ } |
} |
void VRDisplay::updateLayerBounds() { |
@@ -547,6 +786,15 @@ void VRDisplay::updateLayerBounds() { |
device::mojom::blink::VRLayerBoundsPtr rightBounds = |
device::mojom::blink::VRLayerBounds::New(); |
+ // For which pose should these new bounds take effect? |
+ // This depends on if the new layer bounds were applied |
+ // before or after updating the pose. |
+ uint32_t forPoseIndex = m_framePose ? m_framePose->poseIndex : 0; |
+ if (m_canUpdateFramePose) ++forPoseIndex; |
+ |
+ leftBounds->forPoseIndex = forPoseIndex; |
+ rightBounds->forPoseIndex = forPoseIndex; |
+ |
if (m_layer.leftBounds().size() == 4) { |
leftBounds->left = m_layer.leftBounds()[0]; |
leftBounds->top = m_layer.leftBounds()[1]; |
@@ -573,6 +821,25 @@ void VRDisplay::updateLayerBounds() { |
rightBounds->height = 1.0f; |
} |
+#ifdef HACK_PSEUDOSCALING |
+ // TODO(klausw): this assumes that the eyes are arranged left to right with no gaps. |
+ int recWidth = m_eyeParametersLeft->renderWidth() + m_eyeParametersRight->renderWidth(); |
+ int recHeight = m_eyeParametersLeft->renderHeight(); |
+ if (m_sourceWidth != recWidth || m_sourceHeight != recHeight) { |
+ VLOG(1) << __FUNCTION__ << ": resize " << m_sourceWidth << "x" << m_sourceHeight << " to " << recWidth << "x" << recHeight; |
+ double scaleX = (double)m_sourceWidth / recWidth; |
+ double scaleY = (double)m_sourceHeight / recHeight; |
+ leftBounds->left *= scaleX; |
+ leftBounds->top *= scaleY; |
+ leftBounds->width *= scaleX; |
+ leftBounds->height *= scaleY; |
+ rightBounds->left *= scaleX; |
+ rightBounds->top *= scaleY; |
+ rightBounds->width *= scaleX; |
+ rightBounds->height *= scaleY; |
+ } |
+#endif |
+ |
m_display->UpdateLayerBounds(std::move(leftBounds), std::move(rightBounds)); |
} |
@@ -586,20 +853,26 @@ HeapVector<VRLayer> VRDisplay::getLayers() { |
return layers; |
} |
-void VRDisplay::submitFrame() { |
- if (!m_display) |
- return; |
- |
- Document* doc = this->document(); |
- if (!m_isPresenting) { |
- if (doc) { |
- doc->addConsoleMessage(ConsoleMessage::create( |
- RenderingMessageSource, WarningMessageLevel, |
- "submitFrame has no effect when the VRDisplay is not presenting.")); |
- } |
- return; |
+gpu::gles2::GLES2Interface* VRDisplay::getCompositingContext() { |
+ if (!m_contextProvider) { |
+ m_contextProvider = WTF::wrapUnique( |
+ Platform::current()->createSharedOffscreenGraphicsContext3DProvider()); |
} |
+ gpu::gles2::GLES2Interface* sharedContext = nullptr; |
+ if (m_contextProvider) { |
+ sharedContext = m_contextProvider->contextGL(); |
+ |
+ if (!sharedContext) |
+ return nullptr; |
+ } |
+ |
+ return sharedContext; |
+} |
+ |
+ |
+void VRDisplay::submitFrame() { |
+ Document* doc = this->document(); |
if (!m_inAnimationFrame) { |
if (doc) { |
doc->addConsoleMessage( |
@@ -610,44 +883,234 @@ void VRDisplay::submitFrame() { |
return; |
} |
+ if (!m_isPresenting) { |
+ if (doc) { |
+ doc->addConsoleMessage(ConsoleMessage::create( |
+ RenderingMessageSource, WarningMessageLevel, |
+ "submitFrame has no effect when the VRDisplay is not presenting.")); |
+ } |
+ return; |
+ } |
+ |
+ if (!m_canSubmitFramePose) { |
+ if (doc) { |
+ doc->addConsoleMessage(ConsoleMessage::create( |
+ RenderingMessageSource, WarningMessageLevel, |
+ "submitFrame rejected, this pose was already used.")); |
+ m_canUpdateFramePose = true; |
+ } |
+ return; |
+ } |
+ |
+ submitFrameAnyContext(); |
+} |
+ |
+void VRDisplay::submitFrameAnyContext() { |
+ TRACE_EVENT1("media", "klausw:submitFrame", "frame", m_framePose->poseIndex); |
+ |
+ if (!m_display) |
+ return; |
+ |
if (!m_contextGL) { |
// Something got confused, we can't submit frames without a GL context. |
return; |
} |
- // Write the frame number for the pose used into a bottom left pixel block. |
- // It is read by chrome/browser/android/vr_shell/vr_shell.cc to associate |
- // the correct corresponding pose for submission. |
+ if (m_framePose) { |
+ m_framePose->ts_frameStart = m_serviceStartMs; |
+ m_framePose->ts_submit = getMonotonicTimestampMs(); |
+ } |
+ |
+ GLenum error; |
+ (void)error; |
+ |
+#define DRAW_FRAME_COUNTER_PIXEL_FOR_DEBUGGING 0 |
+#if DRAW_FRAME_COUNTER_PIXEL_FOR_DEBUGGING |
auto gl = m_contextGL; |
- |
- // We must ensure that the WebGL app's GL state is preserved. We do this by |
- // calling low-level GL commands directly so that the rendering context's |
- // saved parameters don't get overwritten. |
- |
gl->Enable(GL_SCISSOR_TEST); |
- // Use a few pixels to ensure we get a clean color. The resolution for the |
- // WebGL buffer may not match the final rendered destination size, and |
- // texture filtering could interfere for single pixels. This isn't visible |
- // since the final rendering hides the edges via a vignette effect. |
- gl->Scissor(0, 0, 4, 4); |
+ gl->Scissor(0, 0, 200, 200); |
gl->ColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); |
int idx = m_framePose->poseIndex; |
// Careful with the arithmetic here. Float color 1.f is equivalent to int 255. |
- // Use the low byte of the index as the red component, and store an arbitrary |
- // magic number in green/blue. This number must match the reading code in |
- // vr_shell.cc. Avoid all-black/all-white. |
- gl->ClearColor((idx & 255) / 255.0f, kWebVrPosePixelMagicNumbers[0] / 255.0f, |
- kWebVrPosePixelMagicNumbers[1] / 255.0f, 1.0f); |
+ gl->ClearColor((idx & 255) / 255.0f, ((idx >> 8) & 255) / 255.0f, |
+ ((idx >> 16) & 255) / 255.0f, 1.0f); |
gl->Clear(GL_COLOR_BUFFER_BIT); |
- |
// Set the GL state back to what was set by the WebVR application. |
m_renderingContext->restoreScissorEnabled(); |
m_renderingContext->restoreScissorBox(); |
m_renderingContext->restoreColorMask(); |
m_renderingContext->restoreClearColor(); |
+#endif |
- m_display->SubmitFrame(m_framePose.Clone()); |
+ bool needSurfaceHandle = !m_surfaceHandle; |
+ |
+ int currentWidth = m_layer.source()->width(); |
+ int currentHeight = m_layer.source()->height(); |
+ if (currentWidth != m_sourceWidth || currentHeight != m_sourceHeight) { |
+ VLOG(1) << __FUNCTION__ << ": source size changed from " << |
+ m_sourceWidth << "x" << m_sourceHeight << |
+ " to " << |
+ currentWidth << "x" << currentHeight << |
+ ", m_surfaceHandleRequested=" << m_surfaceHandleRequested << |
+ ", m_surfaceHandle=" << m_surfaceHandle; |
+ if (m_surfaceHandleRequested) { |
+ // We can't currently request a new surface, a request is in flight. |
+ // Just do nothing, try again next iteration. |
+ VLOG(2) << __FUNCTION__ << ": do nothing, request in flight"; |
+ } else { |
+ m_sourceWidth = currentWidth; |
+ m_sourceHeight = currentHeight; |
+ needSurfaceHandle = true; |
+ } |
+ } |
+ |
+ if (needSurfaceHandle) { |
+ if (!m_surfaceHandleRequested) { |
+ auto callback = convertToBaseCallback(WTF::bind( |
+ &VRDisplay::onGetSurfaceHandleComplete, wrapPersistent(this))); |
+ m_surfaceHandleRequested = true; |
+ m_display->GetSurfaceHandle(m_sourceWidth, m_sourceHeight, callback); |
+ VLOG(2) << __FUNCTION__ << ": requesting surface handle for size " << |
+ m_sourceWidth << "x" << m_sourceHeight; |
+ } |
+ VLOG(2) << __FUNCTION__ << ": no surface available yet, discarding frame"; |
+ m_canUpdateFramePose = true; |
+ return; |
+ } |
+ |
+ auto framePose = m_framePose.Clone(); |
+ if (!framePose) { |
+ VLOG(2) << __FUNCTION__ << ": no pose!"; |
+ m_canUpdateFramePose = true; |
+ return; |
+ } |
+ |
+ auto poseIdx = framePose->poseIndex; |
+ |
+ assert(m_surfaceHandle != 0); |
+ assert(poseIdx != 0); |
+ |
+ if (m_framesPending > 0) { |
+ VLOG(1) << __FUNCTION__ << ": m_framesPending=" << m_framesPending << ", deferring frame " << poseIdx; |
+ m_frameWaitingToSubmit = true; |
+ return; |
+ } |
+ |
+ // From this point on we're committed to calling SwapBuffers and submitting |
+ // the frame. Don't do any early return from this point onward. |
+ VLOG(2) << __FUNCTION__ << ": decided to submitFrame, have pose for frame " << poseIdx; |
+ |
+ m_submitExecuteMs = getMonotonicTimestampMs(); |
+ m_historyPreSubmitTimeMs.add(m_submitExecuteMs - m_serviceStartMs); |
+ |
+ // This is a load-bearing glFlush(). Removing it breaks rendering. WTF. |
+ m_contextGL->Flush(); |
+ m_contextGL->SwapBuffers(); |
+ // Use glFinish here to avoid excessive wait-for-completion in |
+ // the single-threaded CrBrowserMain message handler. TODO(klausw): |
+ // use a fence instead? |
+ //m_contextGL->Finish(); |
+ |
+ auto callback = convertToBaseCallback(WTF::bind( |
+ &VRDisplay::onSubmitFrameComplete, wrapPersistent(this))); |
+ |
+ // Update pending frames, we have a surface handle. |
+ VLOG(2) << __FUNCTION__ << ": m_framesPending " << m_framesPending << " => " << m_framesPending + 1; |
+ ++m_framesPending; |
+ |
+ m_display->SubmitFrame(m_surfaceHandle, std::move(framePose), callback); |
+ m_canSubmitFramePose = false; // Illegal to reuse the same pose twice. |
m_canUpdateFramePose = true; |
+ VLOG(2) << __FUNCTION__ << ": submit done for frame " << poseIdx; |
+} |
+ |
+void VRDisplay::onGetSurfaceHandleComplete(int32_t surfaceHandle) { |
+ VLOG(1) << __FUNCTION__ << ": VRDisplay new surface handle=" << surfaceHandle; |
+ m_surfaceHandleRequested = false; |
+ |
+ if (m_surfaceHandle) { |
+ // We have a surface, disconnect it so that the reconnect |
+ // below resizes it. |
+ // |
+ // TODO(klausw): simplify this, would be nice to do a call |
+ // to m_renderingContext->setSurfaceHandle(current_handle) |
+ // would do an in-place resize. See |
+ // SurfaceTexture.setDefaultBufferSize documentation: |
+ // |
+ // For OpenGL ES, the EGLSurface should be destroyed |
+ // (via eglDestroySurface), made not-current (via |
+ // eglMakeCurrent), and then recreated (via |
+ // eglCreateWindowSurface) to ensure that the new |
+ // default size has taken effect. |
+ VLOG(2) << __FUNCTION__ << ": zero current surface handle to resize"; |
+ m_renderingContext->setSurfaceHandle(0); |
+ m_surfaceHandle = 0; |
+ } |
+ |
+ // TODO(klausw): special-case in-place resize? |
+ m_renderingContext->setSurfaceHandle(surfaceHandle); |
+ m_surfaceHandle = surfaceHandle; |
+ |
+ // Reset stats counters since a new resolution may have |
+ // very different performance. |
+ m_historyPreSubmitTimeMs.clear(); |
+ m_historyPostSubmitTimeMs.clear(); |
+ updateLayerBounds(); |
+} |
+ |
+void VRDisplay::onSubmitFrameComplete(int32_t surfaceHandle, uint32_t poseIndex, double renderMs) { |
+ TRACE_EVENT1("media", "klausw:onSubmitFrameComplete", "frame", poseIndex); |
+ VLOG(2) << __FUNCTION__ << ": surface " << surfaceHandle << ", frame " << poseIndex << ", render time " << renderMs << "ms"; |
+ if (!surfaceHandle) { |
+ VLOG(2) << __FUNCTION__ << ": Ignoring callback for invalid surface 0"; |
+ return; |
+ } |
+ if (surfaceHandle != m_surfaceHandle) { |
+ VLOG(1) << __FUNCTION__ << ": Ignoring callback, was for surface " << surfaceHandle << " which is not the current surface " << m_surfaceHandle; |
+ return; |
+ } |
+ |
+ // Update pending frames, the callback matches our active surface handle. |
+ VLOG(2) << __FUNCTION__ << ": m_framesPending " << m_framesPending << " => " << m_framesPending - 1; |
+ --m_framesPending; // should now be == 0 |
+ |
+ m_historyPostSubmitTimeMs.add(getMonotonicTimestampMs() - m_submitExecuteMs); |
+ |
+ if (m_frameWaitingToSubmit) { |
+ VLOG(2) << __FUNCTION__ << ": ready to submit deferred frame " << m_framePose->poseIndex; |
+ submitFrameAnyContext(); |
+ // m_framesPending is now == 1 |
+ m_frameWaitingToSubmit = false; |
+ |
+ // Run next rAF now, don't wait for next timer tick. |
+ // TODO(klausw): try to stay tied to vsync and run at 30fps |
+ // if not keeping up? |
+ bool waitForNextTick = false; |
+ if (m_historyPreSubmitTimeMs.hasPrediction()) { |
+ // a . . b . . c . . d . . e . . f . . g |
+ // | | | | | | |
+ double avgSubmitDuration = m_historyPreSubmitTimeMs.getPrediction(); |
+ double frameTimeMs = 1000.0 / 60; // TODO(klausw): measure instead of assuming 60fps. |
+ double nextTickWaitMs = m_frameTickStartMs + frameTimeMs - m_submitExecuteMs; |
+ if (avgSubmitDuration < frameTimeMs && nextTickWaitMs > 0 /* && nextTickWaitMs < frameTimeMs / 2 */) { |
+ // Last submit took less than a frame, and the next tick is soon. Wait for it to avoid exceeding 60fps. |
+ VLOG(2) << __FUNCTION__ << ": avg submit took " << avgSubmitDuration << |
+ "ms, next tick in " << nextTickWaitMs << |
+ "ms. reschedule at next tick after frame " << m_framePose->poseIndex; |
+ waitForNextTick = true; |
+ } else { |
+ VLOG(2) << __FUNCTION__ << ": avg submit took " << avgSubmitDuration << |
+ "ms, next tick in " << nextTickWaitMs << |
+ "ms. reschedule now after frame " << m_framePose->poseIndex; |
+ } |
+ } |
+ if (waitForNextTick) { |
+ rescheduleAtNextTick(); |
+ } else { |
+ // We're backlogged. Run new frame immediately. |
+ serviceScriptedAnimations(); |
+ } |
+ } |
} |
Document* VRDisplay::document() { |
@@ -655,6 +1118,7 @@ Document* VRDisplay::document() { |
} |
void VRDisplay::OnPresentChange() { |
+ VLOG(1) << __FUNCTION__; |
if (m_isPresenting && !m_isValidDeviceForPresenting) { |
VLOG(1) << __FUNCTION__ << ": device not valid, not sending event"; |
return; |
@@ -664,70 +1128,40 @@ void VRDisplay::OnPresentChange() { |
} |
void VRDisplay::OnChanged(device::mojom::blink::VRDisplayInfoPtr display) { |
+ VLOG(1) << __FUNCTION__; |
update(display); |
} |
void VRDisplay::OnExitPresent() { |
+ VLOG(1) << __FUNCTION__; |
forceExitPresent(); |
} |
void VRDisplay::onConnected() { |
+ VLOG(1) << __FUNCTION__; |
m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( |
EventTypeNames::vrdisplayconnect, true, false, this, "connect")); |
} |
void VRDisplay::onDisconnected() { |
+ VLOG(1) << __FUNCTION__; |
m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( |
EventTypeNames::vrdisplaydisconnect, true, false, this, "disconnect")); |
} |
void VRDisplay::OnActivate(device::mojom::blink::VRDisplayEventReason reason) { |
+ VLOG(1) << __FUNCTION__; |
m_navigatorVR->dispatchVRGestureEvent(VRDisplayEvent::create( |
EventTypeNames::vrdisplayactivate, true, false, this, reason)); |
} |
void VRDisplay::OnDeactivate( |
device::mojom::blink::VRDisplayEventReason reason) { |
+ VLOG(1) << __FUNCTION__; |
m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( |
EventTypeNames::vrdisplaydeactivate, true, false, this, reason)); |
} |
-void VRDisplay::onFullscreenCheck(TimerBase*) { |
- if (!m_isPresenting) { |
- m_fullscreenCheckTimer.stop(); |
- return; |
- } |
- // TODO: This is a temporary measure to track if fullscreen mode has been |
- // exited by the UA. If so we need to end VR presentation. Soon we won't |
- // depend on the Fullscreen API to fake VR presentation, so this will |
- // become unnessecary. Until that point, though, this seems preferable to |
- // adding a bunch of notification plumbing to Fullscreen. |
- if (!Fullscreen::isFullscreenElement(*m_layer.source())) { |
- // TODO(mthiesse): Due to asynchronous resizing, we might get kicked out of |
- // fullscreen when changing display parameters upon entering WebVR. So one |
- // time only, we reenter fullscreen after having left it; otherwise we exit |
- // presentation. |
- if (m_reenteredFullscreen) { |
- m_isPresenting = false; |
- OnPresentChange(); |
- m_fullscreenCheckTimer.stop(); |
- if (m_display) |
- m_display->ExitPresent(); |
- return; |
- } |
- m_reenteredFullscreen = true; |
- auto canvas = m_layer.source(); |
- Document* doc = this->document(); |
- std::unique_ptr<UserGestureIndicator> gestureIndicator; |
- if (doc) { |
- gestureIndicator = WTF::wrapUnique( |
- new UserGestureIndicator(DocumentUserGestureToken::create( |
- doc, UserGestureToken::Status::PossiblyExistingGesture))); |
- } |
- Fullscreen::requestFullscreen(*canvas); |
- } |
-} |
- |
ScriptedAnimationController& VRDisplay::ensureScriptedAnimationController( |
Document* doc) { |
if (!m_scriptedAnimationController) |