| OLD | NEW |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "modules/vr/VRDisplay.h" | 5 #include "modules/vr/VRDisplay.h" |
| 6 | 6 |
| 7 #include "core/css/StylePropertySet.h" | 7 #include "core/css/StylePropertySet.h" |
| 8 #include "core/dom/DOMException.h" | 8 #include "core/dom/DOMException.h" |
| 9 #include "core/dom/DocumentUserGestureToken.h" | 9 #include "core/dom/DocumentUserGestureToken.h" |
| 10 #include "core/dom/FrameRequestCallback.h" | 10 #include "core/dom/FrameRequestCallback.h" |
| 11 #include "core/dom/Fullscreen.h" | |
| 12 #include "core/dom/ScriptedAnimationController.h" | 11 #include "core/dom/ScriptedAnimationController.h" |
| 12 #include "core/frame/FrameView.h" |
| 13 #include "core/frame/UseCounter.h" | 13 #include "core/frame/UseCounter.h" |
| 14 #include "core/inspector/ConsoleMessage.h" | 14 #include "core/inspector/ConsoleMessage.h" |
| 15 #include "core/loader/DocumentLoader.h" | 15 #include "core/loader/DocumentLoader.h" |
| 16 #include "gpu/command_buffer/client/gles2_interface.h" | 16 #include "gpu/command_buffer/client/gles2_interface.h" |
| 17 #include "modules/EventTargetModules.h" | 17 #include "modules/EventTargetModules.h" |
| 18 #include "modules/vr/NavigatorVR.h" | 18 #include "modules/vr/NavigatorVR.h" |
| 19 #include "modules/vr/VRController.h" | 19 #include "modules/vr/VRController.h" |
| 20 #include "modules/vr/VRDisplayCapabilities.h" | 20 #include "modules/vr/VRDisplayCapabilities.h" |
| 21 #include "modules/vr/VREyeParameters.h" | 21 #include "modules/vr/VREyeParameters.h" |
| 22 #include "modules/vr/VRFrameData.h" | 22 #include "modules/vr/VRFrameData.h" |
| 23 #include "modules/vr/VRLayer.h" | 23 #include "modules/vr/VRLayer.h" |
| 24 #include "modules/vr/VRPose.h" | 24 #include "modules/vr/VRPose.h" |
| 25 #include "modules/vr/VRStageParameters.h" | 25 #include "modules/vr/VRStageParameters.h" |
| 26 #include "modules/webgl/WebGLRenderingContextBase.h" | 26 #include "modules/webgl/WebGLRenderingContextBase.h" |
| 27 #include "platform/Histogram.h" | 27 #include "platform/Histogram.h" |
| 28 #include "platform/UserGestureIndicator.h" | 28 #include "platform/UserGestureIndicator.h" |
| 29 #include "platform/instrumentation/tracing/TraceEvent.h" |
| 29 #include "public/platform/Platform.h" | 30 #include "public/platform/Platform.h" |
| 30 #include "wtf/AutoReset.h" | 31 #include "wtf/AutoReset.h" |
| 31 | 32 |
| 32 #include <array> | 33 #include <time.h> |
| 33 | 34 |
| 34 namespace blink { | 35 namespace blink { |
| 35 | 36 |
| 36 namespace { | 37 namespace { |
| 37 | 38 |
| 38 // Magic numbers used to mark valid pose index values encoded in frame | |
| 39 // data. Must match the magic numbers used in vr_shell.cc. | |
| 40 static constexpr std::array<uint8_t, 2> kWebVrPosePixelMagicNumbers{{42, 142}}; | |
| 41 | |
| 42 VREye stringToVREye(const String& whichEye) { | 39 VREye stringToVREye(const String& whichEye) { |
| 43 if (whichEye == "left") | 40 if (whichEye == "left") |
| 44 return VREyeLeft; | 41 return VREyeLeft; |
| 45 if (whichEye == "right") | 42 if (whichEye == "right") |
| 46 return VREyeRight; | 43 return VREyeRight; |
| 47 return VREyeNone; | 44 return VREyeNone; |
| 48 } | 45 } |
| 49 | 46 |
| 50 class VRDisplayFrameRequestCallback : public FrameRequestCallback { | 47 class VRDisplayFrameRequestCallback : public FrameRequestCallback { |
| 51 public: | 48 public: |
| 52 VRDisplayFrameRequestCallback(VRDisplay* vrDisplay) : m_vrDisplay(vrDisplay) { | 49 VRDisplayFrameRequestCallback(VRDisplay* vrDisplay) : m_vrDisplay(vrDisplay) { |
| 53 m_useLegacyTimeBase = true; | 50 m_useLegacyTimeBase = true; |
| 54 } | 51 } |
| 55 ~VRDisplayFrameRequestCallback() override {} | 52 ~VRDisplayFrameRequestCallback() override {} |
| 56 void handleEvent(double highResTimeMs) override { | 53 void handleEvent(double highResTimeMs) override { |
| 57 Document* doc = m_vrDisplay->document(); | 54 Document* doc = m_vrDisplay->document(); |
| 58 if (!doc) | 55 if (!doc) |
| 59 return; | 56 return; |
| 60 | 57 |
| 61 // Need to divide by 1000 here because serviceScriptedAnimations expects | 58 m_vrDisplay->frameTick(highResTimeMs); |
| 62 // time to be given in seconds. | |
| 63 m_vrDisplay->serviceScriptedAnimations( | |
| 64 doc->loader()->timing().pseudoWallTimeToMonotonicTime(highResTimeMs / | |
| 65 1000.0)); | |
| 66 } | 59 } |
| 67 | 60 |
| 68 DEFINE_INLINE_VIRTUAL_TRACE() { | 61 DEFINE_INLINE_VIRTUAL_TRACE() { |
| 69 visitor->trace(m_vrDisplay); | 62 visitor->trace(m_vrDisplay); |
| 70 | 63 |
| 71 FrameRequestCallback::trace(visitor); | 64 FrameRequestCallback::trace(visitor); |
| 72 } | 65 } |
| 73 | 66 |
| 74 Member<VRDisplay> m_vrDisplay; | 67 Member<VRDisplay> m_vrDisplay; |
| 75 }; | 68 }; |
| 76 | 69 |
| 77 } // namespace | 70 } // namespace |
| 78 | 71 |
| 72 StatTracker::StatTracker(unsigned int capacity) : m_capacity(capacity) {} |
| 73 |
| 74 StatTracker::~StatTracker() = default; |
| 75 |
| 76 void StatTracker::add(double item) { |
| 77 if (m_items.size() >= m_capacity) { |
| 78 m_items.pop_front(); |
| 79 } |
| 80 m_items.push_back(item); |
| 81 } |
| 82 |
| 83 void StatTracker::clear() { |
| 84 m_items.clear(); |
| 85 } |
| 86 |
| 87 bool StatTracker::hasPrediction() { |
| 88 return m_items.size() > 0; |
| 89 } |
| 90 |
| 91 double StatTracker::getPrediction() { |
| 92 assert(hasPrediction()); |
| 93 |
| 94 // If we have 3 or more items, ignore min and max outliers and |
| 95 // average the rest. For 2 or less, minmax.first and minmax.second |
| 96 // will both be m_items.end(), so it's just a plain average. |
| 97 auto minmax = m_items.size() > 2 ? |
| 98 std::minmax_element(m_items.begin(), m_items.end()) : |
| 99 std::minmax_element(m_items.end(), m_items.end()); |
| 100 |
| 101 double sum = 0.0; |
| 102 int count = 0; |
| 103 //VLOG(2) << __FUNCTION__ << ": stat start"; |
| 104 for (auto it = m_items.begin(); it != m_items.end(); ++it) { |
| 105 //VLOG(2) << __FUNCTION__ << ": val=" << *it; |
| 106 if (it == minmax.first || it == minmax.second) continue; |
| 107 sum += *it; |
| 108 ++count; |
| 109 } |
| 110 //VLOG(2) << __FUNCTION__ << ": stat return " << sum / count; |
| 111 return sum / count; |
| 112 } |
| 113 |
| 114 static double getMonotonicTimestampMs() { |
| 115 #if defined(OS_ANDROID) |
| 116 // Android surfacetexture timestamp compatible timer? See: |
| 117 // http://androidxref.com/7.0.0_r1/xref/frameworks/native/libs/gui/Surface.cp
p#370 |
| 118 // http://androidxref.com/7.0.0_r1/xref/frameworks/rs/rsCppUtils.h#162 |
| 119 struct timespec t; |
| 120 t.tv_sec = t.tv_nsec = 0; |
| 121 clock_gettime(CLOCK_MONOTONIC, &t); |
| 122 return t.tv_sec * 1e3 + t.tv_nsec * 1e-6; |
| 123 #else |
| 124 return 0.0; |
| 125 #endif |
| 126 } |
| 127 |
| 79 VRDisplay::VRDisplay(NavigatorVR* navigatorVR, | 128 VRDisplay::VRDisplay(NavigatorVR* navigatorVR, |
| 80 device::mojom::blink::VRDisplayPtr display, | 129 device::mojom::blink::VRDisplayPtr display, |
| 81 device::mojom::blink::VRDisplayClientRequest request) | 130 device::mojom::blink::VRDisplayClientRequest request) |
| 82 : ContextLifecycleObserver(navigatorVR->document()), | 131 : ContextLifecycleObserver(navigatorVR->document()), |
| 83 m_navigatorVR(navigatorVR), | 132 m_navigatorVR(navigatorVR), |
| 84 m_isConnected(false), | 133 m_isConnected(false), |
| 85 m_isPresenting(false), | 134 m_isPresenting(false), |
| 86 m_isValidDeviceForPresenting(true), | 135 m_isValidDeviceForPresenting(true), |
| 136 m_framesPending(0), |
| 87 m_canUpdateFramePose(true), | 137 m_canUpdateFramePose(true), |
| 138 m_canSubmitFramePose(false), |
| 88 m_capabilities(new VRDisplayCapabilities()), | 139 m_capabilities(new VRDisplayCapabilities()), |
| 89 m_eyeParametersLeft(new VREyeParameters()), | 140 m_eyeParametersLeft(new VREyeParameters()), |
| 90 m_eyeParametersRight(new VREyeParameters()), | 141 m_eyeParametersRight(new VREyeParameters()), |
| 91 m_depthNear(0.01), | 142 m_depthNear(0.01), |
| 92 m_depthFar(10000.0), | 143 m_depthFar(10000.0), |
| 93 m_fullscreenCheckTimer(this, &VRDisplay::onFullscreenCheck), | |
| 94 m_contextGL(nullptr), | 144 m_contextGL(nullptr), |
| 95 m_animationCallbackRequested(false), | 145 m_animationCallbackRequested(false), |
| 96 m_inAnimationFrame(false), | 146 m_inAnimationFrame(false), |
| 147 m_surfaceHandle(0), |
| 97 m_display(std::move(display)), | 148 m_display(std::move(display)), |
| 98 m_binding(this, std::move(request)) {} | 149 m_binding(this, std::move(request)), |
| 150 m_pose_client_binding(this) { |
| 151 VLOG(1) << __FUNCTION__ << ": CONSTRUCTOR this=" << (void*)this << " m_display
=" << m_display << " ***********************************************************
************************************************"; |
| 152 } |
| 99 | 153 |
| 100 VRDisplay::~VRDisplay() {} | 154 VRDisplay::~VRDisplay() { |
| 155 VLOG(1) << __FUNCTION__ << ": DESTRUCTOR this=" << (void*)this << " m_display=
" << m_display << " ************************************************************
***********************************************"; |
| 156 } |
| 101 | 157 |
| 102 VRController* VRDisplay::controller() { | 158 VRController* VRDisplay::controller() { |
| 103 return m_navigatorVR->controller(); | 159 return m_navigatorVR->controller(); |
| 104 } | 160 } |
| 105 | 161 |
| 106 void VRDisplay::update(const device::mojom::blink::VRDisplayInfoPtr& display) { | 162 void VRDisplay::update(const device::mojom::blink::VRDisplayInfoPtr& display) { |
| 163 VLOG(1) << __FUNCTION__ << ": displayName=" << display->displayName; |
| 107 m_displayId = display->index; | 164 m_displayId = display->index; |
| 108 m_displayName = display->displayName; | 165 m_displayName = display->displayName; |
| 109 m_isConnected = true; | 166 m_isConnected = true; |
| 110 | 167 |
| 111 m_capabilities->setHasOrientation(display->capabilities->hasOrientation); | 168 m_capabilities->setHasOrientation(display->capabilities->hasOrientation); |
| 112 m_capabilities->setHasPosition(display->capabilities->hasPosition); | 169 m_capabilities->setHasPosition(display->capabilities->hasPosition); |
| 113 m_capabilities->setHasExternalDisplay( | 170 m_capabilities->setHasExternalDisplay( |
| 114 display->capabilities->hasExternalDisplay); | 171 display->capabilities->hasExternalDisplay); |
| 115 m_capabilities->setCanPresent(display->capabilities->canPresent); | 172 m_capabilities->setCanPresent(display->capabilities->canPresent); |
| 116 m_capabilities->setMaxLayers(display->capabilities->canPresent ? 1 : 0); | 173 m_capabilities->setMaxLayers(display->capabilities->canPresent ? 1 : 0); |
| 117 | 174 |
| 118 // Ignore non presenting delegate | 175 // Ignore non presenting delegate |
| 119 bool isValid = display->leftEye->renderWidth > 0; | 176 bool isValid = display->leftEye->renderWidth > 0; |
| 120 bool needOnPresentChange = false; | 177 bool needOnPresentChange = false; |
| 178 VLOG(1) << __FUNCTION__ << ": m_isPresenting=" << m_isPresenting << " isValid=
" << isValid << " m_isValidDeviceForPresenting=" << m_isValidDeviceForPresenting
; |
| 121 if (m_isPresenting && isValid && !m_isValidDeviceForPresenting) { | 179 if (m_isPresenting && isValid && !m_isValidDeviceForPresenting) { |
| 122 needOnPresentChange = true; | 180 needOnPresentChange = true; |
| 123 } | 181 } |
| 124 m_isValidDeviceForPresenting = isValid; | 182 m_isValidDeviceForPresenting = isValid; |
| 125 m_eyeParametersLeft->update(display->leftEye); | 183 m_eyeParametersLeft->update(display->leftEye); |
| 126 m_eyeParametersRight->update(display->rightEye); | 184 m_eyeParametersRight->update(display->rightEye); |
| 127 | 185 |
| 128 if (!display->stageParameters.is_null()) { | 186 if (!display->stageParameters.is_null()) { |
| 129 if (!m_stageParameters) | 187 if (!m_stageParameters) |
| 130 m_stageParameters = new VRStageParameters(); | 188 m_stageParameters = new VRStageParameters(); |
| 131 m_stageParameters->update(display->stageParameters); | 189 m_stageParameters->update(display->stageParameters); |
| 132 } else { | 190 } else { |
| 133 m_stageParameters = nullptr; | 191 m_stageParameters = nullptr; |
| 134 } | 192 } |
| 135 | 193 |
| 136 if (needOnPresentChange) { | 194 if (needOnPresentChange) { |
| 137 OnPresentChange(); | 195 OnPresentChange(); |
| 138 } | 196 } |
| 197 VLOG(1) << __FUNCTION__ << ": done"; |
| 139 } | 198 } |
| 140 | 199 |
| 141 void VRDisplay::disconnected() { | 200 void VRDisplay::disconnected() { |
| 201 VLOG(1) << __FUNCTION__; |
| 142 if (m_isConnected) | 202 if (m_isConnected) |
| 143 m_isConnected = !m_isConnected; | 203 m_isConnected = !m_isConnected; |
| 144 } | 204 } |
| 145 | 205 |
| 146 bool VRDisplay::getFrameData(VRFrameData* frameData) { | 206 bool VRDisplay::getFrameData(VRFrameData* frameData) { |
| 207 TRACE_EVENT1("media", "klausw:getFrameData", "frame", m_framePose ? m_framePos
e->poseIndex + 1 : -1); |
| 208 double get_pose_ms = getMonotonicTimestampMs(); |
| 147 updatePose(); | 209 updatePose(); |
| 210 if (m_framePose) { |
| 211 double got_pose_ms = getMonotonicTimestampMs(); |
| 212 // TODO(klausw): why do these show 0.1ms deltas while updateFrame shows 1ms+
? |
| 213 if (m_framePose->ts_getPose == 0.0) { |
| 214 m_framePose->ts_getPose = get_pose_ms; |
| 215 m_framePose->ts_gotPose = got_pose_ms; |
| 216 } |
| 217 } |
| 148 | 218 |
| 149 if (!m_framePose) | 219 if (!m_framePose) |
| 150 return false; | 220 return false; |
| 151 | 221 |
| 152 if (!frameData) | 222 if (!frameData) |
| 153 return false; | 223 return false; |
| 154 | 224 |
| 155 if (m_depthNear == m_depthFar) | 225 if (m_depthNear == m_depthFar) |
| 156 return false; | 226 return false; |
| 157 | 227 |
| 158 return frameData->update(m_framePose, m_eyeParametersLeft, | 228 bool ret = frameData->update(m_framePose, m_eyeParametersLeft, |
| 159 m_eyeParametersRight, m_depthNear, m_depthFar); | 229 m_eyeParametersRight, m_depthNear, m_depthFar); |
| 230 VLOG_IF(2, m_framePose) << __FUNCTION__ << ": frame " << m_framePose->poseInde
x << ", recommended eye renderWidth/Height=" << m_eyeParametersLeft->renderWidth
() << "x" << m_eyeParametersLeft->renderHeight(); |
| 231 |
| 232
return ret; |
| 160 } | 233 } |
| 161 | 234 |
| 162 VRPose* VRDisplay::getPose() { | 235 VRPose* VRDisplay::getPose() { |
| 236 TRACE_EVENT1("media", "klausw:getPose", "next frame ", m_framePose ? m_framePo
se->poseIndex + 1 : -1); |
| 237 double get_pose_ms = getMonotonicTimestampMs(); |
| 163 updatePose(); | 238 updatePose(); |
| 239 double got_pose_ms = getMonotonicTimestampMs(); |
| 240 if (!m_framePose) { |
| 241 VLOG(2) << __FUNCTION__ << ": no pose for next frame "; |
| 242 return nullptr; |
| 243 } |
| 164 | 244 |
| 165 if (!m_framePose) | 245 if (m_framePose->ts_getPose == 0.0) { |
| 166 return nullptr; | 246 m_framePose->ts_getPose = get_pose_ms; |
| 247 m_framePose->ts_gotPose = got_pose_ms; |
| 248 } |
| 167 | 249 |
| 168 VRPose* pose = VRPose::create(); | 250 VRPose* pose = VRPose::create(); |
| 169 pose->setPose(m_framePose); | 251 pose->setPose(m_framePose); |
| 252 VLOG(2) << __FUNCTION__ << ": next frame " << m_framePose->poseIndex; |
| 170 return pose; | 253 return pose; |
| 171 } | 254 } |
| 172 | 255 |
| 256 void VRDisplay::requestPose() { |
| 257 VLOG(2) << __FUNCTION__; |
| 258 m_poseCallbackPending = true; |
| 259 m_display->GetPose(m_pose_client_binding.CreateInterfacePtrAndBind()); |
| 260 } |
| 261 |
| 262 void VRDisplay::OnPoseReceived(device::mojom::blink::VRPosePtr pose) { |
| 263 VLOG(2) << __FUNCTION__; |
| 264 m_framePose = std::move(pose); |
| 265 m_poseCallbackPending = false; |
| 266 } |
| 267 |
| 173 void VRDisplay::updatePose() { | 268 void VRDisplay::updatePose() { |
| 269 VLOG(2) << __FUNCTION__ << ": display=" << m_display << " canUpdateFramePose="
<< m_canUpdateFramePose; |
| 174 if (m_displayBlurred) { | 270 if (m_displayBlurred) { |
| 175 // WebVR spec says to return a null pose when the display is blurred. | 271 // WebVR spec says to return a null pose when the display is blurred. |
| 176 m_framePose = nullptr; | 272 m_framePose = nullptr; |
| 177 return; | 273 return; |
| 178 } | 274 } |
| 179 if (m_canUpdateFramePose) { | 275 // If a pose was prefetched and is complete, do nothing. |
| 276 // If prefetch is still pending, wait for it to complete. |
| 277 // If no prefetch was started, start it and wait for result. |
| 278 if (m_canUpdateFramePose || m_poseCallbackPending) { |
| 180 if (!m_display) | 279 if (!m_display) |
| 181 return; | 280 return; |
| 182 device::mojom::blink::VRPosePtr pose; | 281 device::mojom::blink::VRPosePtr pose; |
| 183 m_display->GetPose(&pose); | 282 TRACE_EVENT1("media", "klausw:updatePose", "frame", m_framePose ? m_framePos
e->poseIndex + 1 : -1); |
| 184 m_framePose = std::move(pose); | 283 double get_pose_ms = getMonotonicTimestampMs(); |
| 185 if (m_isPresenting) | 284 if (!m_poseCallbackPending) { |
| 285 requestPose(); |
| 286 } |
| 287 while (m_poseCallbackPending) { |
| 288 {VLOG(2) << __FUNCTION__ << ": wait for pose";} |
| 289 if (!m_pose_client_binding.WaitForIncomingMethodCall()) { |
| 290 LOG(ERROR) << __FUNCTION__ << ": failed to receive a pose"; |
| 291 } |
| 292 } |
| 293 if (!m_framePose) { |
| 294 VLOG(2) << __FUNCTION__ << ": did not get a pose"; |
| 295 return; |
| 296 } |
| 297 double got_pose_ms = getMonotonicTimestampMs(); |
| 298 m_canSubmitFramePose = true; |
| 299 VLOG(2) << __FUNCTION__ << ": updatePose to " << (m_framePose ? m_framePose-
>poseIndex : -1) << " took " << got_pose_ms - get_pose_ms << "ms"; |
| 300 // For this newly fetched frame, zero the timers and let getPose |
| 301 // or getFrameData update it. This way we can ensure that we |
| 302 // measure the actual JS pause time one time, and don't overwrite |
| 303 // it for redundant calls. If we prefetch the pose, the underlying |
| 304 // fetch time wouldn't matter. |
| 305 m_framePose->ts_getPose = 0.0; |
| 306 m_framePose->ts_gotPose = 0.0; |
| 307 if (m_isPresenting) { |
| 186 m_canUpdateFramePose = false; | 308 m_canUpdateFramePose = false; |
| 309 } |
| 187 } | 310 } |
| 188 } | 311 } |
| 189 | 312 |
| 190 void VRDisplay::resetPose() { | 313 void VRDisplay::resetPose() { |
| 191 if (!m_display) | 314 if (!m_display) |
| 192 return; | 315 return; |
| 193 | 316 |
| 194 m_display->ResetPose(); | 317 m_display->ResetPose(); |
| 195 } | 318 } |
| 196 | 319 |
| 197 VREyeParameters* VRDisplay::getEyeParameters(const String& whichEye) { | 320 VREyeParameters* VRDisplay::getEyeParameters(const String& whichEye) { |
| 198 switch (stringToVREye(whichEye)) { | 321 switch (stringToVREye(whichEye)) { |
| 199 case VREyeLeft: | 322 case VREyeLeft: |
| 323 VLOG(1) << __FUNCTION__ << ": left renderWidth/Height=" << m_eyeParameters
Left->renderWidth() << "x" << m_eyeParametersLeft->renderHeight(); |
| 200 return m_eyeParametersLeft; | 324 return m_eyeParametersLeft; |
| 201 case VREyeRight: | 325 case VREyeRight: |
| 326 VLOG(1) << __FUNCTION__ << ": right renderWidth/Height=" << m_eyeParameter
sRight->renderWidth() << "x" << m_eyeParametersRight->renderHeight(); |
| 202 return m_eyeParametersRight; | 327 return m_eyeParametersRight; |
| 203 default: | 328 default: |
| 204 return nullptr; | 329 return nullptr; |
| 205 } | 330 } |
| 206 } | 331 } |
| 207 | 332 |
| 208 int VRDisplay::requestAnimationFrame(FrameRequestCallback* callback) { | 333 int VRDisplay::requestAnimationFrame(FrameRequestCallback* callback) { |
| 334 TRACE_EVENT1("media", "klausw:rAF", "frame", m_framePose ? m_framePose->poseIn
dex : -1); |
| 209 Document* doc = this->document(); | 335 Document* doc = this->document(); |
| 210 if (!doc) | 336 if (!doc) |
| 211 return 0; | 337 return 0; |
| 212 | 338 |
| 213 if (!m_animationCallbackRequested) { | 339 if (!m_animationCallbackRequested) { |
| 214 doc->requestAnimationFrame(new VRDisplayFrameRequestCallback(this)); | 340 doc->requestAnimationFrame(new VRDisplayFrameRequestCallback(this)); |
| 215 m_animationCallbackRequested = true; | 341 m_animationCallbackRequested = true; |
| 216 } | 342 } |
| 217 | 343 |
| 218 callback->m_useLegacyTimeBase = false; | 344 callback->m_useLegacyTimeBase = false; |
| 219 return ensureScriptedAnimationController(doc).registerCallback(callback); | 345 return ensureScriptedAnimationController(doc).registerCallback(callback); |
| 220 } | 346 } |
| 221 | 347 |
| 222 void VRDisplay::cancelAnimationFrame(int id) { | 348 void VRDisplay::cancelAnimationFrame(int id) { |
| 223 if (!m_scriptedAnimationController) | 349 if (!m_scriptedAnimationController) |
| 224 return; | 350 return; |
| 225 m_scriptedAnimationController->cancelCallback(id); | 351 m_scriptedAnimationController->cancelCallback(id); |
| 226 } | 352 } |
| 227 | 353 |
| 228 void VRDisplay::OnBlur() { | 354 void VRDisplay::OnBlur() { |
| 355 VLOG(1) << __FUNCTION__; |
| 229 m_displayBlurred = true; | 356 m_displayBlurred = true; |
| 230 | 357 |
| 231 m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( | 358 m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( |
| 232 EventTypeNames::vrdisplayblur, true, false, this, "")); | 359 EventTypeNames::vrdisplayblur, true, false, this, "")); |
| 233 } | 360 } |
| 234 | 361 |
| 235 void VRDisplay::OnFocus() { | 362 void VRDisplay::OnFocus() { |
| 363 VLOG(1) << __FUNCTION__; |
| 236 m_displayBlurred = false; | 364 m_displayBlurred = false; |
| 237 // Restart our internal doc requestAnimationFrame callback, if it fired while | 365 // Restart our internal doc requestAnimationFrame callback, if it fired while |
| 238 // the display was blurred. | 366 // the display was blurred. |
| 239 // TODO(bajones): Don't use doc->requestAnimationFrame() at all. Animation | 367 // TODO(bajones): Don't use doc->requestAnimationFrame() at all. Animation |
| 240 // frames should be tied to the presenting VR display (e.g. should be serviced | 368 // frames should be tied to the presenting VR display (e.g. should be serviced |
| 241 // by GVR library callbacks on Android), and not the doc frame rate. | 369 // by GVR library callbacks on Android), and not the doc frame rate. |
| 242 if (!m_animationCallbackRequested) { | 370 if (!m_animationCallbackRequested) { |
| 243 Document* doc = this->document(); | 371 Document* doc = this->document(); |
| 244 if (!doc) | 372 if (!doc) |
| 245 return; | 373 return; |
| 246 doc->requestAnimationFrame(new VRDisplayFrameRequestCallback(this)); | 374 doc->requestAnimationFrame(new VRDisplayFrameRequestCallback(this)); |
| 247 } | 375 } |
| 248 m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( | 376 m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( |
| 249 EventTypeNames::vrdisplayfocus, true, false, this, "")); | 377 EventTypeNames::vrdisplayfocus, true, false, this, "")); |
| 250 } | 378 } |
| 251 | 379 |
| 252 void VRDisplay::serviceScriptedAnimations(double monotonicAnimationStartTime) { | 380 #if 0 |
| 381 static void delayTest() { |
| 382 VLOG(2) << __FUNCTION__ << ": delay got executed"; |
| 383 } |
| 384 #endif |
| 385 |
| 386 |
| 387 // TODO(klausw): refactor: |
| 388 // replace serviceScriptedAnimations with new callback chain sequence: |
| 389 // | tick getPose service_rAF | tick |
| 390 // | 0ms 4ms 8ms 16.7ms| 0ms |
| 391 // |
| 392 // // Save tickStart at VRDisplayFrameRequestCallback.handleEvent? |
| 393 // timerTick |
| 394 // tickStart=now() |
| 395 // postDelayedTask(getPoseForFrame, poseOffset) |
| 396 // getPoseForFrame |
| 397 // getPose // TODO: getPoseAsync(gotPoseCallback), then next step in gotPo
seCallback |
| 398 // postDelayedTask(service_rAF_offset - (now() - tickStart)) |
| 399 |
| 400 |
| 401 void VRDisplay::frameTick(double highResTimeMs) { |
| 402 m_tickIsScheduled = false; |
| 253 if (!m_scriptedAnimationController) | 403 if (!m_scriptedAnimationController) |
| 254 return; | 404 return; |
| 405 TRACE_EVENT1("media", "frameTick", "frame", m_framePose ? m_framePose->poseInd
ex + 1 : -1); |
| 406 |
| 407 #if 0 |
| 408 Platform::current()->currentThread()->getWebTaskRunner()->postDelayedTask( |
| 409 BLINK_FROM_HERE, WTF::bind(&delayTest), 8.0); |
| 410 #endif |
| 411 |
| 412 double timeNow = getMonotonicTimestampMs(); |
| 413 m_frameTickStartMs = timeNow; |
| 414 m_highResTimeMsAtLastFrameTick = highResTimeMs; |
| 415 |
| 416 VLOG(2) << __FUNCTION__ << ": next frame " << (m_framePose ? m_framePose->pose
Index + 1 : -1) << " pending=" << m_framesPending; |
| 417 bool skipThisTick = false; |
| 418 if (m_framesPending > 1) { |
| 419 LOG(ERROR) << __FUNCTION__ << ": SHOULD NOT HAPPEN: too many frames pending,
want <=1, have " << m_framesPending; |
| 420 skipThisTick = true; |
| 421 } else if (m_frameWaitingToSubmit) { |
| 422 VLOG(2) << __FUNCTION__ << ": have frame waiting to be submitted"; |
| 423 // Don't pump the timer loop, that will happen when the pending frame comple
tes. |
| 424 return; |
| 425 } else if (m_framesPending > 0 && |
| 426 m_historyPreSubmitTimeMs.hasPrediction() && |
| 427 m_historyPostSubmitTimeMs.hasPrediction()) { |
| 428 #if 0 |
| 429 double expectedRenderTime = m_historyPostSubmitTimeMs.getPrediction(); |
| 430 double expectedFinishRender = m_submitExecuteMs + expectedRenderTime - timeN
ow; |
| 431 // Simple logic - skip this frame if remaining rendering will take |
| 432 // more than one frame. |
| 433 if (expectedFinishRender > frameTime + 5.0) skipThisTick = true; |
| 434 VLOG(2) << __FUNCTION__ << ": expectedRenderTime=" << |
| 435 expectedRenderTime << " expectedFinishRender=" << |
| 436 expectedFinishRender << " frameTime=" << frameTime << " skip=" << |
| 437 skipThisTick; |
| 438 #endif |
| 439 #if 0 |
| 440 // Alternate attempt - try to throttle to 30fps if not hitting 60fps. |
| 441 // Doesn't work great at this point, needs further tuning and testing. |
| 442 double allowedTime = frameTime + 5.0; |
| 443 double expectedSubmit = m_historyPreSubmitTimeMs.getPrediction(); |
| 444 if (expectedFinishRender + expectedSubmit > allowedTime) skipThisTick = true
; |
| 445 VLOG(2) << __FUNCTION__ << ": expectedRenderTime=" << expectedRenderTime <<
" expectedFinishRender=" << expectedFinishRender << " expectedSubmit=" << expect
edSubmit << " frameTime=" << frameTime << " skip=" << skipThisTick; |
| 446 #endif |
| 447 } |
| 448 { |
| 449 double frameTimeMs = 1000.0 / 60; // TODO(klausw): measure instead of assumi
ng 60fps. |
| 450 if (timeNow - m_submitExecuteMs < frameTimeMs / 2) { |
| 451 // Last submission was less than half a frame ago. |
| 452 skipThisTick = true; |
| 453 VLOG(2) << __FUNCTION__ << ": last submit was " << timeNow - m_submitExecu
teMs << "ms ago, skip=" << skipThisTick; |
| 454 } |
| 455 } |
| 456 if (skipThisTick) { |
| 457 VLOG(2) << __FUNCTION__ << ": SKIP FRAME, pending=" << m_framesPending; |
| 458 rescheduleAtNextTick(); |
| 459 return; |
| 460 } |
| 461 |
| 462 serviceScriptedAnimations(); |
| 463 } |
| 464 |
| 465 void VRDisplay::rescheduleAtNextTick() { |
| 466 if (m_tickIsScheduled) return; |
| 467 m_tickIsScheduled = true; |
| 468 |
| 469 Document* doc = m_navigatorVR->document(); |
| 470 if (!doc) { |
| 471 VLOG(2) << __FUNCTION__ << ": cannot skip frame, no document?!"; |
| 472 return; |
| 473 } |
| 474 doc->requestAnimationFrame(new VRDisplayFrameRequestCallback(this)); |
| 475 } |
| 476 |
| 477 void VRDisplay::serviceScriptedAnimations() { |
| 478 if (!m_scriptedAnimationController) |
| 479 return; |
| 480 TRACE_EVENT1("media", "serviceScriptedAnimations", "frame", m_framePose ? m_fr
amePose->poseIndex + 1 : -1); |
| 481 |
| 255 AutoReset<bool> animating(&m_inAnimationFrame, true); | 482 AutoReset<bool> animating(&m_inAnimationFrame, true); |
| 256 m_animationCallbackRequested = false; | 483 m_animationCallbackRequested = false; |
| 257 | 484 |
| 258 // We use an internal rAF callback to run the animation loop at the display | 485 // We use an internal rAF callback to run the animation loop at the display |
| 259 // speed, and run the user's callback after our internal callback fires. | 486 // speed, and run the user's callback after our internal callback fires. |
| 260 // However, when the display is blurred, we want to pause the animation loop, | 487 // However, when the display is blurred, we want to pause the animation loop, |
| 261 // so we don't fire the user's callback until the display is focused. | 488 // so we don't fire the user's callback until the display is focused. |
| 262 if (m_displayBlurred) | 489 if (m_displayBlurred) |
| 263 return; | 490 return; |
| 491 |
| 492 m_serviceStartMs = getMonotonicTimestampMs(); |
| 493 |
| 494 // Adjust high res timer since some time may have passed since the frame tick. |
| 495 double highResTimeMs = m_highResTimeMsAtLastFrameTick + |
| 496 m_serviceStartMs - m_frameTickStartMs; |
| 497 |
| 498 if (m_isPresenting && m_canUpdateFramePose) { |
| 499 // Prefetch pose asynchronously to avoid waiting for it. |
| 500 // TODO(klausw): align its timing so that it arrives just |
| 501 // in time depending on when JS usually asks for it. |
| 502 m_canUpdateFramePose = false; |
| 503 requestPose(); |
| 504 } |
| 505 |
| 506 Document* doc = m_navigatorVR->document(); |
| 507 if (!doc || !doc->loader()) { |
| 508 VLOG(2) << __FUNCTION__ << ": no document"; |
| 509 return; |
| 510 } |
| 511 // Need to divide by 1000 here because serviceScriptedAnimations expects |
| 512 // time to be given in seconds. |
| 513 double monotonicAnimationStartTime = |
| 514 doc->loader()->timing().pseudoWallTimeToMonotonicTime(highResTimeMs / |
| 515 1000.0); |
| 264 m_scriptedAnimationController->serviceScriptedAnimations( | 516 m_scriptedAnimationController->serviceScriptedAnimations( |
| 265 monotonicAnimationStartTime); | 517 monotonicAnimationStartTime); |
| 518 m_tickIsScheduled = true; |
| 266 } | 519 } |
| 267 | 520 |
| 268 void ReportPresentationResult(PresentationResult result) { | 521 void ReportPresentationResult(PresentationResult result) { |
| 269 // Note that this is called twice for each call to requestPresent - | 522 // Note that this is called twice for each call to requestPresent - |
| 270 // one to declare that requestPresent was called, and one for the | 523 // one to declare that requestPresent was called, and one for the |
| 271 // result. | 524 // result. |
| 272 DEFINE_STATIC_LOCAL( | 525 DEFINE_STATIC_LOCAL( |
| 273 EnumerationHistogram, vrPresentationResultHistogram, | 526 EnumerationHistogram, vrPresentationResultHistogram, |
| 274 ("VRDisplayPresentResult", | 527 ("VRDisplayPresentResult", |
| 275 static_cast<int>(PresentationResult::PresentationResultMax))); | 528 static_cast<int>(PresentationResult::PresentationResultMax))); |
| 276 vrPresentationResultHistogram.count(static_cast<int>(result)); | 529 vrPresentationResultHistogram.count(static_cast<int>(result)); |
| 277 } | 530 } |
| 278 | 531 |
| 279 ScriptPromise VRDisplay::requestPresent(ScriptState* scriptState, | 532 ScriptPromise VRDisplay::requestPresent(ScriptState* scriptState, |
| 280 const HeapVector<VRLayer>& layers) { | 533 const HeapVector<VRLayer>& layers) { |
| 534 VLOG(1) << __FUNCTION__; |
| 281 ExecutionContext* executionContext = scriptState->getExecutionContext(); | 535 ExecutionContext* executionContext = scriptState->getExecutionContext(); |
| 282 UseCounter::count(executionContext, UseCounter::VRRequestPresent); | 536 UseCounter::count(executionContext, UseCounter::VRRequestPresent); |
| 283 if (!executionContext->isSecureContext()) { | 537 if (!executionContext->isSecureContext()) { |
| 284 UseCounter::count(executionContext, | 538 UseCounter::count(executionContext, |
| 285 UseCounter::VRRequestPresentInsecureOrigin); | 539 UseCounter::VRRequestPresentInsecureOrigin); |
| 286 } | 540 } |
| 287 | 541 |
| 288 ReportPresentationResult(PresentationResult::Requested); | 542 ReportPresentationResult(PresentationResult::Requested); |
| 289 | 543 |
| 290 ScriptPromiseResolver* resolver = ScriptPromiseResolver::create(scriptState); | 544 ScriptPromiseResolver* resolver = ScriptPromiseResolver::create(scriptState); |
| 291 ScriptPromise promise = resolver->promise(); | 545 ScriptPromise promise = resolver->promise(); |
| 292 | 546 |
| 293 // If the VRDisplay does not advertise the ability to present reject the | 547 // If the VRDisplay does not advertise the ability to present reject the |
| 294 // request. | 548 // request. |
| 295 if (!m_capabilities->canPresent()) { | 549 if (!m_capabilities->canPresent()) { |
| 550 VLOG(1) << __FUNCTION__ << ": REJECT: VRDisplay cannot present"; |
| 296 DOMException* exception = | 551 DOMException* exception = |
| 297 DOMException::create(InvalidStateError, "VRDisplay cannot present."); | 552 DOMException::create(InvalidStateError, "VRDisplay cannot present."); |
| 298 resolver->reject(exception); | 553 resolver->reject(exception); |
| 299 ReportPresentationResult(PresentationResult::VRDisplayCannotPresent); | 554 ReportPresentationResult(PresentationResult::VRDisplayCannotPresent); |
| 300 return promise; | 555 return promise; |
| 301 } | 556 } |
| 302 | 557 |
| 303 bool firstPresent = !m_isPresenting; | 558 bool firstPresent = !m_isPresenting; |
| 304 | 559 |
| 305 // Initiating VR presentation is only allowed in response to a user gesture. | 560 // Initiating VR presentation is only allowed in response to a user gesture. |
| 306 // If the VRDisplay is already presenting, however, repeated calls are | 561 // If the VRDisplay is already presenting, however, repeated calls are |
| 307 // allowed outside a user gesture so that the presented content may be | 562 // allowed outside a user gesture so that the presented content may be |
| 308 // updated. | 563 // updated. |
| 564 #define HACK_DISABLE_USER_GESTURE_REQUIREMENT 0 |
| 565 #if !HACK_DISABLE_USER_GESTURE_REQUIREMENT |
| 309 if (firstPresent && !UserGestureIndicator::utilizeUserGesture()) { | 566 if (firstPresent && !UserGestureIndicator::utilizeUserGesture()) { |
| 567 VLOG(1) << __FUNCTION__ << ": REJECT: API can only be initiated by a user ge
sture"; |
| 310 DOMException* exception = DOMException::create( | 568 DOMException* exception = DOMException::create( |
| 311 InvalidStateError, "API can only be initiated by a user gesture."); | 569 InvalidStateError, "API can only be initiated by a user gesture."); |
| 312 resolver->reject(exception); | 570 resolver->reject(exception); |
| 313 ReportPresentationResult(PresentationResult::NotInitiatedByUserGesture); | 571 ReportPresentationResult(PresentationResult::NotInitiatedByUserGesture); |
| 314 return promise; | 572 return promise; |
| 315 } | 573 } |
| 574 #endif |
| 316 | 575 |
| 317 // A valid number of layers must be provided in order to present. | 576 // A valid number of layers must be provided in order to present. |
| 318 if (layers.size() == 0 || layers.size() > m_capabilities->maxLayers()) { | 577 if (layers.size() == 0 || layers.size() > m_capabilities->maxLayers()) { |
| 578 VLOG(1) << __FUNCTION__ << ": REJECT: Invalid number of layers"; |
| 319 forceExitPresent(); | 579 forceExitPresent(); |
| 320 DOMException* exception = | 580 DOMException* exception = |
| 321 DOMException::create(InvalidStateError, "Invalid number of layers."); | 581 DOMException::create(InvalidStateError, "Invalid number of layers."); |
| 322 resolver->reject(exception); | 582 resolver->reject(exception); |
| 323 ReportPresentationResult(PresentationResult::InvalidNumberOfLayers); | 583 ReportPresentationResult(PresentationResult::InvalidNumberOfLayers); |
| 324 return promise; | 584 return promise; |
| 325 } | 585 } |
| 326 | 586 |
| 327 m_layer = layers[0]; | 587 m_layer = layers[0]; |
| 328 | 588 |
| 329 if (!m_layer.source()) { | 589 if (!m_layer.source()) { |
| 590 VLOG(1) << __FUNCTION__ << ": REJECT: Invalid layer source"; |
| 330 forceExitPresent(); | 591 forceExitPresent(); |
| 331 DOMException* exception = | 592 DOMException* exception = |
| 332 DOMException::create(InvalidStateError, "Invalid layer source."); | 593 DOMException::create(InvalidStateError, "Invalid layer source."); |
| 333 resolver->reject(exception); | 594 resolver->reject(exception); |
| 334 ReportPresentationResult(PresentationResult::InvalidLayerSource); | 595 ReportPresentationResult(PresentationResult::InvalidLayerSource); |
| 335 return promise; | 596 return promise; |
| 336 } | 597 } |
| 337 | 598 |
| 338 CanvasRenderingContext* renderingContext = | 599 CanvasRenderingContext* renderingContext = |
| 339 m_layer.source()->renderingContext(); | 600 m_layer.source()->renderingContext(); |
| 601 if (firstPresent) { |
| 602 m_sourceWidth = m_layer.source()->width(); |
| 603 m_sourceHeight = m_layer.source()->height(); |
| 604 } |
| 340 | 605 |
| 341 if (!renderingContext || !renderingContext->is3d()) { | 606 if (!renderingContext || !renderingContext->is3d()) { |
| 607 VLOG(1) << __FUNCTION__ << ": REJECT: Layer source must have a WebGLRenderin
gContext"; |
| 342 forceExitPresent(); | 608 forceExitPresent(); |
| 343 DOMException* exception = DOMException::create( | 609 DOMException* exception = DOMException::create( |
| 344 InvalidStateError, "Layer source must have a WebGLRenderingContext"); | 610 InvalidStateError, "Layer source must have a WebGLRenderingContext"); |
| 345 resolver->reject(exception); | 611 resolver->reject(exception); |
| 346 ReportPresentationResult( | 612 ReportPresentationResult( |
| 347 PresentationResult::LayerSourceMissingWebGLContext); | 613 PresentationResult::LayerSourceMissingWebGLContext); |
| 348 return promise; | 614 return promise; |
| 349 } | 615 } |
| 350 | 616 |
| 351 // Save the WebGL script and underlying GL contexts for use by submitFrame(). | 617 // Save the WebGL script and underlying GL contexts for use by submitFrame(). |
| 352 m_renderingContext = toWebGLRenderingContextBase(renderingContext); | 618 m_renderingContext = toWebGLRenderingContextBase(renderingContext); |
| 353 m_contextGL = m_renderingContext->contextGL(); | 619 m_contextGL = m_renderingContext->contextGL(); |
| 354 | 620 |
| 355 if ((m_layer.leftBounds().size() != 0 && m_layer.leftBounds().size() != 4) || | 621 if ((m_layer.leftBounds().size() != 0 && m_layer.leftBounds().size() != 4) || |
| 356 (m_layer.rightBounds().size() != 0 && | 622 (m_layer.rightBounds().size() != 0 && |
| 357 m_layer.rightBounds().size() != 4)) { | 623 m_layer.rightBounds().size() != 4)) { |
| 624 VLOG(1) << __FUNCTION__ << ": REJECT: Layer bounds must either be an empty a
rray or have 4 values"; |
| 358 forceExitPresent(); | 625 forceExitPresent(); |
| 359 DOMException* exception = DOMException::create( | 626 DOMException* exception = DOMException::create( |
| 360 InvalidStateError, | 627 InvalidStateError, |
| 361 "Layer bounds must either be an empty array or have 4 values"); | 628 "Layer bounds must either be an empty array or have 4 values"); |
| 362 resolver->reject(exception); | 629 resolver->reject(exception); |
| 363 ReportPresentationResult(PresentationResult::InvalidLayerBounds); | 630 ReportPresentationResult(PresentationResult::InvalidLayerBounds); |
| 364 return promise; | 631 return promise; |
| 365 } | 632 } |
| 366 | 633 |
| 367 if (!m_pendingPresentResolvers.isEmpty()) { | 634 if (!m_pendingPresentResolvers.isEmpty()) { |
| 368 // If we are waiting on the results of a previous requestPresent call don't | 635 // If we are waiting on the results of a previous requestPresent call don't |
| 369 // fire a new request, just cache the resolver and resolve it when the | 636 // fire a new request, just cache the resolver and resolve it when the |
| 370 // original request returns. | 637 // original request returns. |
| 371 m_pendingPresentResolvers.append(resolver); | 638 m_pendingPresentResolvers.append(resolver); |
| 372 } else if (firstPresent) { | 639 } else if (firstPresent) { |
| 373 bool secureContext = scriptState->getExecutionContext()->isSecureContext(); | 640 bool secureContext = scriptState->getExecutionContext()->isSecureContext(); |
| 374 if (!m_display) { | 641 if (!m_display) { |
| 642 VLOG(1) << __FUNCTION__ << ": REJECT: The service is no longer active"; |
| 375 forceExitPresent(); | 643 forceExitPresent(); |
| 376 DOMException* exception = DOMException::create( | 644 DOMException* exception = DOMException::create( |
| 377 InvalidStateError, "The service is no longer active."); | 645 InvalidStateError, "The service is no longer active."); |
| 378 resolver->reject(exception); | 646 resolver->reject(exception); |
| 379 return promise; | 647 return promise; |
| 380 } | 648 } |
| 381 | 649 |
| 382 m_pendingPresentResolvers.append(resolver); | 650 m_pendingPresentResolvers.append(resolver); |
| 383 m_display->RequestPresent(secureContext, convertToBaseCallback(WTF::bind( | 651 m_display->RequestPresent(secureContext, convertToBaseCallback(WTF::bind( |
| 384 &VRDisplay::onPresentComplete, | 652 &VRDisplay::onPresentComplete, |
| 385 wrapPersistent(this)))); | 653 wrapPersistent(this)))); |
| 386 } else { | 654 } else { |
| 655 m_isPresenting = true; |
| 387 updateLayerBounds(); | 656 updateLayerBounds(); |
| 388 resolver->resolve(); | 657 resolver->resolve(); |
| 389 ReportPresentationResult(PresentationResult::SuccessAlreadyPresenting); | 658 ReportPresentationResult(PresentationResult::SuccessAlreadyPresenting); |
| 390 } | 659 } |
| 391 | 660 |
| 392 return promise; | 661 return promise; |
| 393 } | 662 } |
| 394 | 663 |
| 395 void VRDisplay::onPresentComplete(bool success) { | 664 void VRDisplay::onPresentComplete(bool success) { |
| 665 VLOG(1) << __FUNCTION__; |
| 396 if (success) { | 666 if (success) { |
| 397 this->beginPresent(); | 667 this->beginPresent(); |
| 398 } else { | 668 } else { |
| 399 this->forceExitPresent(); | 669 this->forceExitPresent(); |
| 400 DOMException* exception = DOMException::create( | 670 DOMException* exception = DOMException::create( |
| 401 NotAllowedError, "Presentation request was denied."); | 671 NotAllowedError, "Presentation request was denied."); |
| 402 | 672 |
| 403 while (!m_pendingPresentResolvers.isEmpty()) { | 673 while (!m_pendingPresentResolvers.isEmpty()) { |
| 404 ScriptPromiseResolver* resolver = m_pendingPresentResolvers.takeFirst(); | 674 ScriptPromiseResolver* resolver = m_pendingPresentResolvers.takeFirst(); |
| 405 resolver->reject(exception); | 675 resolver->reject(exception); |
| 406 } | 676 } |
| 407 } | 677 } |
| 408 } | 678 } |
| 409 | 679 |
| 410 ScriptPromise VRDisplay::exitPresent(ScriptState* scriptState) { | 680 ScriptPromise VRDisplay::exitPresent(ScriptState* scriptState) { |
| 681 VLOG(1) << __FUNCTION__; |
| 411 ScriptPromiseResolver* resolver = ScriptPromiseResolver::create(scriptState); | 682 ScriptPromiseResolver* resolver = ScriptPromiseResolver::create(scriptState); |
| 412 ScriptPromise promise = resolver->promise(); | 683 ScriptPromise promise = resolver->promise(); |
| 413 | 684 |
| 414 if (!m_isPresenting) { | 685 if (!m_isPresenting) { |
| 415 // Can't stop presenting if we're not presenting. | 686 // Can't stop presenting if we're not presenting. |
| 416 DOMException* exception = | 687 DOMException* exception = |
| 417 DOMException::create(InvalidStateError, "VRDisplay is not presenting."); | 688 DOMException::create(InvalidStateError, "VRDisplay is not presenting."); |
| 418 resolver->reject(exception); | 689 resolver->reject(exception); |
| 419 return promise; | 690 return promise; |
| 420 } | 691 } |
| 421 | 692 |
| 422 if (!m_display) { | 693 if (!m_display) { |
| 423 DOMException* exception = | 694 DOMException* exception = |
| 424 DOMException::create(InvalidStateError, "VRService is not available."); | 695 DOMException::create(InvalidStateError, "VRService is not available."); |
| 425 resolver->reject(exception); | 696 resolver->reject(exception); |
| 426 return promise; | 697 return promise; |
| 427 } | 698 } |
| 428 m_display->ExitPresent(); | 699 m_display->ExitPresent(); |
| 429 | 700 |
| 430 resolver->resolve(); | 701 resolver->resolve(); |
| 431 | 702 |
| 432 forceExitPresent(); | 703 forceExitPresent(); |
| 433 | 704 |
| 434 return promise; | 705 return promise; |
| 435 } | 706 } |
| 436 | 707 |
| 437 void VRDisplay::beginPresent() { | 708 void VRDisplay::beginPresent() { |
| 709 VLOG(1) << __FUNCTION__; |
| 438 Document* doc = this->document(); | 710 Document* doc = this->document(); |
| 439 std::unique_ptr<UserGestureIndicator> gestureIndicator; | 711 std::unique_ptr<UserGestureIndicator> gestureIndicator; |
| 440 if (m_capabilities->hasExternalDisplay()) { | 712 if (m_capabilities->hasExternalDisplay()) { |
| 441 forceExitPresent(); | 713 forceExitPresent(); |
| 442 DOMException* exception = DOMException::create( | 714 DOMException* exception = DOMException::create( |
| 443 InvalidStateError, | 715 InvalidStateError, |
| 444 "VR Presentation not implemented for this VRDisplay."); | 716 "VR Presentation not implemented for this VRDisplay."); |
| 445 while (!m_pendingPresentResolvers.isEmpty()) { | 717 while (!m_pendingPresentResolvers.isEmpty()) { |
| 446 ScriptPromiseResolver* resolver = m_pendingPresentResolvers.takeFirst(); | 718 ScriptPromiseResolver* resolver = m_pendingPresentResolvers.takeFirst(); |
| 447 resolver->reject(exception); | 719 resolver->reject(exception); |
| 448 } | 720 } |
| 449 ReportPresentationResult( | 721 ReportPresentationResult( |
| 450 PresentationResult::PresentationNotSupportedByDisplay); | 722 PresentationResult::PresentationNotSupportedByDisplay); |
| 451 return; | 723 return; |
| 452 } else { | 724 } else { |
| 453 // TODO(klausw,crbug.com/655722): Need a proper VR compositor, but | |
| 454 // for the moment on mobile we'll just make the canvas fullscreen | |
| 455 // so that VrShell can pick it up through the standard (high | |
| 456 // latency) compositing path. | |
| 457 auto canvas = m_layer.source(); | |
| 458 auto inlineStyle = canvas->inlineStyle(); | |
| 459 if (inlineStyle) { | |
| 460 // THREE.js's VREffect sets explicit style.width/height on its rendering | |
| 461 // canvas based on the non-fullscreen window dimensions, and it keeps | |
| 462 // those unchanged when presenting. Unfortunately it appears that a | |
| 463 // fullscreened canvas just gets centered if it has explicitly set a | |
| 464 // size smaller than the fullscreen dimensions. Manually set size to | |
| 465 // 100% in this case and restore it when exiting fullscreen. This is a | |
| 466 // stopgap measure since THREE.js's usage appears legal according to the | |
| 467 // WebVR API spec. This will no longer be necessary once we can get rid | |
| 468 // of this fullscreen hack. | |
| 469 m_fullscreenOrigWidth = inlineStyle->getPropertyValue(CSSPropertyWidth); | |
| 470 if (!m_fullscreenOrigWidth.isNull()) { | |
| 471 canvas->setInlineStyleProperty(CSSPropertyWidth, "100%"); | |
| 472 } | |
| 473 m_fullscreenOrigHeight = inlineStyle->getPropertyValue(CSSPropertyHeight); | |
| 474 if (!m_fullscreenOrigHeight.isNull()) { | |
| 475 canvas->setInlineStyleProperty(CSSPropertyHeight, "100%"); | |
| 476 } | |
| 477 } else { | |
| 478 m_fullscreenOrigWidth = String(); | |
| 479 m_fullscreenOrigHeight = String(); | |
| 480 } | |
| 481 | |
| 482 if (doc) { | |
| 483 // Since the callback for requestPresent is asynchronous, we've lost our | |
| 484 // UserGestureToken, and need to create a new one to enter fullscreen. | |
| 485 gestureIndicator = WTF::wrapUnique( | |
| 486 new UserGestureIndicator(DocumentUserGestureToken::create( | |
| 487 doc, UserGestureToken::Status::PossiblyExistingGesture))); | |
| 488 } | |
| 489 Fullscreen::requestFullscreen(*canvas); | |
| 490 | |
| 491 // Check to see if the canvas is still the current fullscreen | |
| 492 // element once every 2 seconds. | |
| 493 m_fullscreenCheckTimer.startRepeating(2.0, BLINK_FROM_HERE); | |
| 494 m_reenteredFullscreen = false; | |
| 495 } | 725 } |
| 496 | 726 |
| 497 if (doc) { | 727 if (doc) { |
| 498 Platform::current()->recordRapporURL("VR.WebVR.PresentSuccess", | 728 Platform::current()->recordRapporURL("VR.WebVR.PresentSuccess", |
| 499 WebURL(doc->url())); | 729 WebURL(doc->url())); |
| 500 } | 730 } |
| 501 | 731 |
| 732 // Stop unneeded compositor updates. We do so by hiding the view. We can't |
| 733 // hide the page, doing so causes an assertion failure (!m_isHidden) in |
| 734 // DrawingBuffer::prepareTextureMailboxInternal(). Do this only when we're |
| 735 // actually presenting (m_isPresenting is true), see corresponding show() |
| 736 // in forceExitPresent(). Otherwise the view may remain hidden for failing |
| 737 // DON flow. |
| 738 m_navigatorVR->document()->view()->hide(); |
| 739 |
| 502 m_isPresenting = true; | 740 m_isPresenting = true; |
| 503 ReportPresentationResult(PresentationResult::Success); | 741 ReportPresentationResult(PresentationResult::Success); |
| 504 | 742 |
| 505 updateLayerBounds(); | 743 updateLayerBounds(); |
| 506 | 744 |
| 507 while (!m_pendingPresentResolvers.isEmpty()) { | 745 while (!m_pendingPresentResolvers.isEmpty()) { |
| 508 ScriptPromiseResolver* resolver = m_pendingPresentResolvers.takeFirst(); | 746 ScriptPromiseResolver* resolver = m_pendingPresentResolvers.takeFirst(); |
| 509 resolver->resolve(); | 747 resolver->resolve(); |
| 510 } | 748 } |
| 511 OnPresentChange(); | 749 OnPresentChange(); |
| 512 } | 750 } |
| 513 | 751 |
| 514 void VRDisplay::forceExitPresent() { | 752 void VRDisplay::forceExitPresent() { |
| 753 VLOG(1) << __FUNCTION__; |
| 754 if (m_surfaceHandle) { |
| 755 m_renderingContext->setSurfaceHandle(0); |
| 756 m_surfaceHandle = 0; |
| 757 } |
| 758 |
| 515 if (m_isPresenting) { | 759 if (m_isPresenting) { |
| 516 if (!m_capabilities->hasExternalDisplay()) { | |
| 517 auto canvas = m_layer.source(); | |
| 518 Fullscreen::fullyExitFullscreen(canvas->document()); | |
| 519 m_fullscreenCheckTimer.stop(); | |
| 520 if (!m_fullscreenOrigWidth.isNull()) { | |
| 521 canvas->setInlineStyleProperty(CSSPropertyWidth, m_fullscreenOrigWidth); | |
| 522 m_fullscreenOrigWidth = String(); | |
| 523 } | |
| 524 if (!m_fullscreenOrigHeight.isNull()) { | |
| 525 canvas->setInlineStyleProperty(CSSPropertyWidth, | |
| 526 m_fullscreenOrigHeight); | |
| 527 m_fullscreenOrigHeight = String(); | |
| 528 } | |
| 529 } else { | |
| 530 // Can't get into this presentation mode, so nothing to do here. | |
| 531 } | |
| 532 m_isPresenting = false; | 760 m_isPresenting = false; |
| 761 m_canUpdateFramePose = true; |
| 533 OnPresentChange(); | 762 OnPresentChange(); |
| 763 m_navigatorVR->document()->view()->show(); |
| 534 } | 764 } |
| 535 | 765 |
| 536 m_renderingContext = nullptr; | 766 m_renderingContext = nullptr; |
| 537 m_contextGL = nullptr; | 767 m_contextGL = nullptr; |
| 768 m_contextProvider = nullptr; |
| 769 |
| 770 VLOG(2) << __FUNCTION__ << ": lost surface, reset m_framesPending " << m_frame
sPending << " => " << 0; |
| 771 m_framesPending = 0; |
| 772 if (m_frameWaitingToSubmit) { |
| 773 m_frameWaitingToSubmit = false; |
| 774 // Make sure not to lose a rAF call if we ignore a pending frame. |
| 775 serviceScriptedAnimations(); |
| 776 } |
| 538 } | 777 } |
| 539 | 778 |
| 540 void VRDisplay::updateLayerBounds() { | 779 void VRDisplay::updateLayerBounds() { |
| 541 if (!m_display) | 780 if (!m_display) |
| 542 return; | 781 return; |
| 543 | 782 |
| 544 // Set up the texture bounds for the provided layer | 783 // Set up the texture bounds for the provided layer |
| 545 device::mojom::blink::VRLayerBoundsPtr leftBounds = | 784 device::mojom::blink::VRLayerBoundsPtr leftBounds = |
| 546 device::mojom::blink::VRLayerBounds::New(); | 785 device::mojom::blink::VRLayerBounds::New(); |
| 547 device::mojom::blink::VRLayerBoundsPtr rightBounds = | 786 device::mojom::blink::VRLayerBoundsPtr rightBounds = |
| 548 device::mojom::blink::VRLayerBounds::New(); | 787 device::mojom::blink::VRLayerBounds::New(); |
| 549 | 788 |
| 789 // For which pose should these new bounds take effect? |
| 790 // This depends on if the new layer bounds were applied |
| 791 // before or after updating the pose. |
| 792 uint32_t forPoseIndex = m_framePose ? m_framePose->poseIndex : 0; |
| 793 if (m_canUpdateFramePose) ++forPoseIndex; |
| 794 |
| 795 leftBounds->forPoseIndex = forPoseIndex; |
| 796 rightBounds->forPoseIndex = forPoseIndex; |
| 797 |
| 550 if (m_layer.leftBounds().size() == 4) { | 798 if (m_layer.leftBounds().size() == 4) { |
| 551 leftBounds->left = m_layer.leftBounds()[0]; | 799 leftBounds->left = m_layer.leftBounds()[0]; |
| 552 leftBounds->top = m_layer.leftBounds()[1]; | 800 leftBounds->top = m_layer.leftBounds()[1]; |
| 553 leftBounds->width = m_layer.leftBounds()[2]; | 801 leftBounds->width = m_layer.leftBounds()[2]; |
| 554 leftBounds->height = m_layer.leftBounds()[3]; | 802 leftBounds->height = m_layer.leftBounds()[3]; |
| 555 } else { | 803 } else { |
| 556 // Left eye defaults | 804 // Left eye defaults |
| 557 leftBounds->left = 0.0f; | 805 leftBounds->left = 0.0f; |
| 558 leftBounds->top = 0.0f; | 806 leftBounds->top = 0.0f; |
| 559 leftBounds->width = 0.5f; | 807 leftBounds->width = 0.5f; |
| 560 leftBounds->height = 1.0f; | 808 leftBounds->height = 1.0f; |
| 561 } | 809 } |
| 562 | 810 |
| 563 if (m_layer.rightBounds().size() == 4) { | 811 if (m_layer.rightBounds().size() == 4) { |
| 564 rightBounds->left = m_layer.rightBounds()[0]; | 812 rightBounds->left = m_layer.rightBounds()[0]; |
| 565 rightBounds->top = m_layer.rightBounds()[1]; | 813 rightBounds->top = m_layer.rightBounds()[1]; |
| 566 rightBounds->width = m_layer.rightBounds()[2]; | 814 rightBounds->width = m_layer.rightBounds()[2]; |
| 567 rightBounds->height = m_layer.rightBounds()[3]; | 815 rightBounds->height = m_layer.rightBounds()[3]; |
| 568 } else { | 816 } else { |
| 569 // Right eye defaults | 817 // Right eye defaults |
| 570 rightBounds->left = 0.5f; | 818 rightBounds->left = 0.5f; |
| 571 rightBounds->top = 0.0f; | 819 rightBounds->top = 0.0f; |
| 572 rightBounds->width = 0.5f; | 820 rightBounds->width = 0.5f; |
| 573 rightBounds->height = 1.0f; | 821 rightBounds->height = 1.0f; |
| 574 } | 822 } |
| 575 | 823 |
| 824 #ifdef HACK_PSEUDOSCALING |
| 825 // TODO(klausw): this assumes that the eyes are arranged left to right with no
gaps. |
| 826 int recWidth = m_eyeParametersLeft->renderWidth() + m_eyeParametersRight->rend
erWidth(); |
| 827 int recHeight = m_eyeParametersLeft->renderHeight(); |
| 828 if (m_sourceWidth != recWidth || m_sourceHeight != recHeight) { |
| 829 VLOG(1) << __FUNCTION__ << ": resize " << m_sourceWidth << "x" << m_sourceHe
ight << " to " << recWidth << "x" << recHeight; |
| 830 double scaleX = (double)m_sourceWidth / recWidth; |
| 831 double scaleY = (double)m_sourceHeight / recHeight; |
| 832 leftBounds->left *= scaleX; |
| 833 leftBounds->top *= scaleY; |
| 834 leftBounds->width *= scaleX; |
| 835 leftBounds->height *= scaleY; |
| 836 rightBounds->left *= scaleX; |
| 837 rightBounds->top *= scaleY; |
| 838 rightBounds->width *= scaleX; |
| 839 rightBounds->height *= scaleY; |
| 840 } |
| 841 #endif |
| 842 |
| 576 m_display->UpdateLayerBounds(std::move(leftBounds), std::move(rightBounds)); | 843 m_display->UpdateLayerBounds(std::move(leftBounds), std::move(rightBounds)); |
| 577 } | 844 } |
| 578 | 845 |
| 579 HeapVector<VRLayer> VRDisplay::getLayers() { | 846 HeapVector<VRLayer> VRDisplay::getLayers() { |
| 580 HeapVector<VRLayer> layers; | 847 HeapVector<VRLayer> layers; |
| 581 | 848 |
| 582 if (m_isPresenting) { | 849 if (m_isPresenting) { |
| 583 layers.append(m_layer); | 850 layers.append(m_layer); |
| 584 } | 851 } |
| 585 | 852 |
| 586 return layers; | 853 return layers; |
| 587 } | 854 } |
| 588 | 855 |
| 589 void VRDisplay::submitFrame() { | 856 gpu::gles2::GLES2Interface* VRDisplay::getCompositingContext() { |
| 590 if (!m_display) | 857 if (!m_contextProvider) { |
| 591 return; | 858 m_contextProvider = WTF::wrapUnique( |
| 592 | 859 Platform::current()->createSharedOffscreenGraphicsContext3DProvider()); |
| 593 Document* doc = this->document(); | |
| 594 if (!m_isPresenting) { | |
| 595 if (doc) { | |
| 596 doc->addConsoleMessage(ConsoleMessage::create( | |
| 597 RenderingMessageSource, WarningMessageLevel, | |
| 598 "submitFrame has no effect when the VRDisplay is not presenting.")); | |
| 599 } | |
| 600 return; | |
| 601 } | 860 } |
| 602 | 861 |
| 862 gpu::gles2::GLES2Interface* sharedContext = nullptr; |
| 863 if (m_contextProvider) { |
| 864 sharedContext = m_contextProvider->contextGL(); |
| 865 |
| 866 if (!sharedContext) |
| 867 return nullptr; |
| 868 } |
| 869 |
| 870 return sharedContext; |
| 871 } |
| 872 |
| 873 |
| 874 void VRDisplay::submitFrame() { |
| 875 Document* doc = this->document(); |
| 603 if (!m_inAnimationFrame) { | 876 if (!m_inAnimationFrame) { |
| 604 if (doc) { | 877 if (doc) { |
| 605 doc->addConsoleMessage( | 878 doc->addConsoleMessage( |
| 606 ConsoleMessage::create(RenderingMessageSource, WarningMessageLevel, | 879 ConsoleMessage::create(RenderingMessageSource, WarningMessageLevel, |
| 607 "submitFrame must be called within a " | 880 "submitFrame must be called within a " |
| 608 "VRDisplay.requestAnimationFrame callback.")); | 881 "VRDisplay.requestAnimationFrame callback.")); |
| 609 } | 882 } |
| 610 return; | 883 return; |
| 611 } | 884 } |
| 612 | 885 |
| 886 if (!m_isPresenting) { |
| 887 if (doc) { |
| 888 doc->addConsoleMessage(ConsoleMessage::create( |
| 889 RenderingMessageSource, WarningMessageLevel, |
| 890 "submitFrame has no effect when the VRDisplay is not presenting.")); |
| 891 } |
| 892 return; |
| 893 } |
| 894 |
| 895 if (!m_canSubmitFramePose) { |
| 896 if (doc) { |
| 897 doc->addConsoleMessage(ConsoleMessage::create( |
| 898 RenderingMessageSource, WarningMessageLevel, |
| 899 "submitFrame rejected, this pose was already used.")); |
| 900 m_canUpdateFramePose = true; |
| 901 } |
| 902 return; |
| 903 } |
| 904 |
| 905 submitFrameAnyContext(); |
| 906 } |
| 907 |
| 908 void VRDisplay::submitFrameAnyContext() { |
| 909 TRACE_EVENT1("media", "klausw:submitFrame", "frame", m_framePose->poseIndex); |
| 910 |
| 911 if (!m_display) |
| 912 return; |
| 913 |
| 613 if (!m_contextGL) { | 914 if (!m_contextGL) { |
| 614 // Something got confused, we can't submit frames without a GL context. | 915 // Something got confused, we can't submit frames without a GL context. |
| 615 return; | 916 return; |
| 616 } | 917 } |
| 617 | 918 |
| 618 // Write the frame number for the pose used into a bottom left pixel block. | 919 if (m_framePose) { |
| 619 // It is read by chrome/browser/android/vr_shell/vr_shell.cc to associate | 920 m_framePose->ts_frameStart = m_serviceStartMs; |
| 620 // the correct corresponding pose for submission. | 921 m_framePose->ts_submit = getMonotonicTimestampMs(); |
| 922 } |
| 923 |
| 924 GLenum error; |
| 925 (void)error; |
| 926 |
| 927 #define DRAW_FRAME_COUNTER_PIXEL_FOR_DEBUGGING 0 |
| 928 #if DRAW_FRAME_COUNTER_PIXEL_FOR_DEBUGGING |
| 621 auto gl = m_contextGL; | 929 auto gl = m_contextGL; |
| 622 | |
| 623 // We must ensure that the WebGL app's GL state is preserved. We do this by | |
| 624 // calling low-level GL commands directly so that the rendering context's | |
| 625 // saved parameters don't get overwritten. | |
| 626 | |
| 627 gl->Enable(GL_SCISSOR_TEST); | 930 gl->Enable(GL_SCISSOR_TEST); |
| 628 // Use a few pixels to ensure we get a clean color. The resolution for the | 931 gl->Scissor(0, 0, 200, 200); |
| 629 // WebGL buffer may not match the final rendered destination size, and | |
| 630 // texture filtering could interfere for single pixels. This isn't visible | |
| 631 // since the final rendering hides the edges via a vignette effect. | |
| 632 gl->Scissor(0, 0, 4, 4); | |
| 633 gl->ColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); | 932 gl->ColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); |
| 634 int idx = m_framePose->poseIndex; | 933 int idx = m_framePose->poseIndex; |
| 635 // Careful with the arithmetic here. Float color 1.f is equivalent to int 255. | 934 // Careful with the arithmetic here. Float color 1.f is equivalent to int 255. |
| 636 // Use the low byte of the index as the red component, and store an arbitrary | 935 gl->ClearColor((idx & 255) / 255.0f, ((idx >> 8) & 255) / 255.0f, |
| 637 // magic number in green/blue. This number must match the reading code in | 936 ((idx >> 16) & 255) / 255.0f, 1.0f); |
| 638 // vr_shell.cc. Avoid all-black/all-white. | |
| 639 gl->ClearColor((idx & 255) / 255.0f, kWebVrPosePixelMagicNumbers[0] / 255.0f, | |
| 640 kWebVrPosePixelMagicNumbers[1] / 255.0f, 1.0f); | |
| 641 gl->Clear(GL_COLOR_BUFFER_BIT); | 937 gl->Clear(GL_COLOR_BUFFER_BIT); |
| 642 | |
| 643 // Set the GL state back to what was set by the WebVR application. | 938 // Set the GL state back to what was set by the WebVR application. |
| 644 m_renderingContext->restoreScissorEnabled(); | 939 m_renderingContext->restoreScissorEnabled(); |
| 645 m_renderingContext->restoreScissorBox(); | 940 m_renderingContext->restoreScissorBox(); |
| 646 m_renderingContext->restoreColorMask(); | 941 m_renderingContext->restoreColorMask(); |
| 647 m_renderingContext->restoreClearColor(); | 942 m_renderingContext->restoreClearColor(); |
| 648 | 943 #endif |
| 649 m_display->SubmitFrame(m_framePose.Clone()); | 944 |
| 945 bool needSurfaceHandle = !m_surfaceHandle; |
| 946 |
| 947 int currentWidth = m_layer.source()->width(); |
| 948 int currentHeight = m_layer.source()->height(); |
| 949 if (currentWidth != m_sourceWidth || currentHeight != m_sourceHeight) { |
| 950 VLOG(1) << __FUNCTION__ << ": source size changed from " << |
| 951 m_sourceWidth << "x" << m_sourceHeight << |
| 952 " to " << |
| 953 currentWidth << "x" << currentHeight << |
| 954 ", m_surfaceHandleRequested=" << m_surfaceHandleRequested << |
| 955 ", m_surfaceHandle=" << m_surfaceHandle; |
| 956 if (m_surfaceHandleRequested) { |
| 957 // We can't currently request a new surface, a request is in flight. |
| 958 // Just do nothing, try again next iteration. |
| 959 VLOG(2) << __FUNCTION__ << ": do nothing, request in flight"; |
| 960 } else { |
| 961 m_sourceWidth = currentWidth; |
| 962 m_sourceHeight = currentHeight; |
| 963 needSurfaceHandle = true; |
| 964 } |
| 965 } |
| 966 |
| 967 if (needSurfaceHandle) { |
| 968 if (!m_surfaceHandleRequested) { |
| 969 auto callback = convertToBaseCallback(WTF::bind( |
| 970 &VRDisplay::onGetSurfaceHandleComplete, wrapPersistent(this))); |
| 971 m_surfaceHandleRequested = true; |
| 972 m_display->GetSurfaceHandle(m_sourceWidth, m_sourceHeight, callback); |
| 973 VLOG(2) << __FUNCTION__ << ": requesting surface handle for size " << |
| 974 m_sourceWidth << "x" << m_sourceHeight; |
| 975 } |
| 976 VLOG(2) << __FUNCTION__ << ": no surface available yet, discarding frame"; |
| 977 m_canUpdateFramePose = true; |
| 978 return; |
| 979 } |
| 980 |
| 981 auto framePose = m_framePose.Clone(); |
| 982 if (!framePose) { |
| 983 VLOG(2) << __FUNCTION__ << ": no pose!"; |
| 984 m_canUpdateFramePose = true; |
| 985 return; |
| 986 } |
| 987 |
| 988 auto poseIdx = framePose->poseIndex; |
| 989 |
| 990 assert(m_surfaceHandle != 0); |
| 991 assert(poseIdx != 0); |
| 992 |
| 993 if (m_framesPending > 0) { |
| 994 VLOG(1) << __FUNCTION__ << ": m_framesPending=" << m_framesPending << ", def
erring frame " << poseIdx; |
| 995 m_frameWaitingToSubmit = true; |
| 996 return; |
| 997 } |
| 998 |
| 999 // From this point on we're committed to calling SwapBuffers and submitting |
| 1000 // the frame. Don't do any early return from this point onward. |
| 1001 VLOG(2) << __FUNCTION__ << ": decided to submitFrame, have pose for frame " <<
poseIdx; |
| 1002 |
| 1003 m_submitExecuteMs = getMonotonicTimestampMs(); |
| 1004 m_historyPreSubmitTimeMs.add(m_submitExecuteMs - m_serviceStartMs); |
| 1005 |
| 1006 // This is a load-bearing glFlush(). Removing it breaks rendering. WTF. |
| 1007 m_contextGL->Flush(); |
| 1008 m_contextGL->SwapBuffers(); |
| 1009 // Use glFinish here to avoid excessive wait-for-completion in |
| 1010 // the single-threaded CrBrowserMain message handler. TODO(klausw): |
| 1011 // use a fence instead? |
| 1012 //m_contextGL->Finish(); |
| 1013 |
| 1014 auto callback = convertToBaseCallback(WTF::bind( |
| 1015 &VRDisplay::onSubmitFrameComplete, wrapPersistent(this))); |
| 1016 |
| 1017 // Update pending frames, we have a surface handle. |
| 1018 VLOG(2) << __FUNCTION__ << ": m_framesPending " << m_framesPending << " => " <
< m_framesPending + 1; |
| 1019 ++m_framesPending; |
| 1020 |
| 1021 m_display->SubmitFrame(m_surfaceHandle, std::move(framePose), callback); |
| 1022 m_canSubmitFramePose = false; // Illegal to reuse the same pose twice. |
| 650 m_canUpdateFramePose = true; | 1023 m_canUpdateFramePose = true; |
| 1024 VLOG(2) << __FUNCTION__ << ": submit done for frame " << poseIdx; |
| 1025 } |
| 1026 |
| 1027 void VRDisplay::onGetSurfaceHandleComplete(int32_t surfaceHandle) { |
| 1028 VLOG(1) << __FUNCTION__ << ": VRDisplay new surface handle=" << surfaceHandle; |
| 1029 m_surfaceHandleRequested = false; |
| 1030 |
| 1031 if (m_surfaceHandle) { |
| 1032 // We have a surface, disconnect it so that the reconnect |
| 1033 // below resizes it. |
| 1034 // |
| 1035 // TODO(klausw): simplify this, would be nice to do a call |
| 1036 // to m_renderingContext->setSurfaceHandle(current_handle) |
| 1037 // would do an in-place resize. See |
| 1038 // SurfaceTexture.setDefaultBufferSize documentation: |
| 1039 // |
| 1040 // For OpenGL ES, the EGLSurface should be destroyed |
| 1041 // (via eglDestroySurface), made not-current (via |
| 1042 // eglMakeCurrent), and then recreated (via |
| 1043 // eglCreateWindowSurface) to ensure that the new |
| 1044 // default size has taken effect. |
| 1045 VLOG(2) << __FUNCTION__ << ": zero current surface handle to resize"; |
| 1046 m_renderingContext->setSurfaceHandle(0); |
| 1047 m_surfaceHandle = 0; |
| 1048 } |
| 1049 |
| 1050 // TODO(klausw): special-case in-place resize? |
| 1051 m_renderingContext->setSurfaceHandle(surfaceHandle); |
| 1052 m_surfaceHandle = surfaceHandle; |
| 1053 |
| 1054 // Reset stats counters since a new resolution may have |
| 1055 // very different performance. |
| 1056 m_historyPreSubmitTimeMs.clear(); |
| 1057 m_historyPostSubmitTimeMs.clear(); |
| 1058 updateLayerBounds(); |
| 1059 } |
| 1060 |
| 1061 void VRDisplay::onSubmitFrameComplete(int32_t surfaceHandle, uint32_t poseIndex,
double renderMs) { |
| 1062 TRACE_EVENT1("media", "klausw:onSubmitFrameComplete", "frame", poseIndex); |
| 1063 VLOG(2) << __FUNCTION__ << ": surface " << surfaceHandle << ", frame " << pose
Index << ", render time " << renderMs << "ms"; |
| 1064 if (!surfaceHandle) { |
| 1065 VLOG(2) << __FUNCTION__ << ": Ignoring callback for invalid surface 0"; |
| 1066 return; |
| 1067 } |
| 1068 if (surfaceHandle != m_surfaceHandle) { |
| 1069 VLOG(1) << __FUNCTION__ << ": Ignoring callback, was for surface " << surfac
eHandle << " which is not the current surface " << m_surfaceHandle; |
| 1070 return; |
| 1071 } |
| 1072 |
| 1073 // Update pending frames, the callback matches our active surface handle. |
| 1074 VLOG(2) << __FUNCTION__ << ": m_framesPending " << m_framesPending << " => " <
< m_framesPending - 1; |
| 1075 --m_framesPending; // should now be == 0 |
| 1076 |
| 1077 m_historyPostSubmitTimeMs.add(getMonotonicTimestampMs() - m_submitExecuteMs); |
| 1078 |
| 1079 if (m_frameWaitingToSubmit) { |
| 1080 VLOG(2) << __FUNCTION__ << ": ready to submit deferred frame " << m_framePos
e->poseIndex; |
| 1081 submitFrameAnyContext(); |
| 1082 // m_framesPending is now == 1 |
| 1083 m_frameWaitingToSubmit = false; |
| 1084 |
| 1085 // Run next rAF now, don't wait for next timer tick. |
| 1086 // TODO(klausw): try to stay tied to vsync and run at 30fps |
| 1087 // if not keeping up? |
| 1088 bool waitForNextTick = false; |
| 1089 if (m_historyPreSubmitTimeMs.hasPrediction()) { |
| 1090 // a . . b . . c . . d . . e . . f . . g |
| 1091 // | | | | | | |
| 1092 double avgSubmitDuration = m_historyPreSubmitTimeMs.getPrediction(); |
| 1093 double frameTimeMs = 1000.0 / 60; // TODO(klausw): measure instead of assu
ming 60fps. |
| 1094 double nextTickWaitMs = m_frameTickStartMs + frameTimeMs - m_submitExecute
Ms; |
| 1095 if (avgSubmitDuration < frameTimeMs && nextTickWaitMs > 0 /* && nextTickWa
itMs < frameTimeMs / 2 */) { |
| 1096 // Last submit took less than a frame, and the next tick is soon. Wait f
or it to avoid exceeding 60fps. |
| 1097 VLOG(2) << __FUNCTION__ << ": avg submit took " << avgSubmitDuration << |
| 1098 "ms, next tick in " << nextTickWaitMs << |
| 1099 "ms. reschedule at next tick after frame " << m_framePose->poseIndex
; |
| 1100 waitForNextTick = true; |
| 1101 } else { |
| 1102 VLOG(2) << __FUNCTION__ << ": avg submit took " << avgSubmitDuration << |
| 1103 "ms, next tick in " << nextTickWaitMs << |
| 1104 "ms. reschedule now after frame " << m_framePose->poseIndex; |
| 1105 } |
| 1106 } |
| 1107 if (waitForNextTick) { |
| 1108 rescheduleAtNextTick(); |
| 1109 } else { |
| 1110 // We're backlogged. Run new frame immediately. |
| 1111 serviceScriptedAnimations(); |
| 1112 } |
| 1113 } |
| 651 } | 1114 } |
| 652 | 1115 |
| 653 Document* VRDisplay::document() { | 1116 Document* VRDisplay::document() { |
| 654 return m_navigatorVR->document(); | 1117 return m_navigatorVR->document(); |
| 655 } | 1118 } |
| 656 | 1119 |
| 657 void VRDisplay::OnPresentChange() { | 1120 void VRDisplay::OnPresentChange() { |
| 1121 VLOG(1) << __FUNCTION__; |
| 658 if (m_isPresenting && !m_isValidDeviceForPresenting) { | 1122 if (m_isPresenting && !m_isValidDeviceForPresenting) { |
| 659 VLOG(1) << __FUNCTION__ << ": device not valid, not sending event"; | 1123 VLOG(1) << __FUNCTION__ << ": device not valid, not sending event"; |
| 660 return; | 1124 return; |
| 661 } | 1125 } |
| 662 m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( | 1126 m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( |
| 663 EventTypeNames::vrdisplaypresentchange, true, false, this, "")); | 1127 EventTypeNames::vrdisplaypresentchange, true, false, this, "")); |
| 664 } | 1128 } |
| 665 | 1129 |
| 666 void VRDisplay::OnChanged(device::mojom::blink::VRDisplayInfoPtr display) { | 1130 void VRDisplay::OnChanged(device::mojom::blink::VRDisplayInfoPtr display) { |
| 1131 VLOG(1) << __FUNCTION__; |
| 667 update(display); | 1132 update(display); |
| 668 } | 1133 } |
| 669 | 1134 |
| 670 void VRDisplay::OnExitPresent() { | 1135 void VRDisplay::OnExitPresent() { |
| 1136 VLOG(1) << __FUNCTION__; |
| 671 forceExitPresent(); | 1137 forceExitPresent(); |
| 672 } | 1138 } |
| 673 | 1139 |
| 674 void VRDisplay::onConnected() { | 1140 void VRDisplay::onConnected() { |
| 1141 VLOG(1) << __FUNCTION__; |
| 675 m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( | 1142 m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( |
| 676 EventTypeNames::vrdisplayconnect, true, false, this, "connect")); | 1143 EventTypeNames::vrdisplayconnect, true, false, this, "connect")); |
| 677 } | 1144 } |
| 678 | 1145 |
| 679 void VRDisplay::onDisconnected() { | 1146 void VRDisplay::onDisconnected() { |
| 1147 VLOG(1) << __FUNCTION__; |
| 680 m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( | 1148 m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( |
| 681 EventTypeNames::vrdisplaydisconnect, true, false, this, "disconnect")); | 1149 EventTypeNames::vrdisplaydisconnect, true, false, this, "disconnect")); |
| 682 } | 1150 } |
| 683 | 1151 |
| 684 void VRDisplay::OnActivate(device::mojom::blink::VRDisplayEventReason reason) { | 1152 void VRDisplay::OnActivate(device::mojom::blink::VRDisplayEventReason reason) { |
| 1153 VLOG(1) << __FUNCTION__; |
| 685 m_navigatorVR->dispatchVRGestureEvent(VRDisplayEvent::create( | 1154 m_navigatorVR->dispatchVRGestureEvent(VRDisplayEvent::create( |
| 686 EventTypeNames::vrdisplayactivate, true, false, this, reason)); | 1155 EventTypeNames::vrdisplayactivate, true, false, this, reason)); |
| 687 } | 1156 } |
| 688 | 1157 |
| 689 void VRDisplay::OnDeactivate( | 1158 void VRDisplay::OnDeactivate( |
| 690 device::mojom::blink::VRDisplayEventReason reason) { | 1159 device::mojom::blink::VRDisplayEventReason reason) { |
| 1160 VLOG(1) << __FUNCTION__; |
| 691 m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( | 1161 m_navigatorVR->enqueueVREvent(VRDisplayEvent::create( |
| 692 EventTypeNames::vrdisplaydeactivate, true, false, this, reason)); | 1162 EventTypeNames::vrdisplaydeactivate, true, false, this, reason)); |
| 693 } | 1163 } |
| 694 | 1164 |
| 695 void VRDisplay::onFullscreenCheck(TimerBase*) { | |
| 696 if (!m_isPresenting) { | |
| 697 m_fullscreenCheckTimer.stop(); | |
| 698 return; | |
| 699 } | |
| 700 // TODO: This is a temporary measure to track if fullscreen mode has been | |
| 701 // exited by the UA. If so we need to end VR presentation. Soon we won't | |
| 702 // depend on the Fullscreen API to fake VR presentation, so this will | |
| 703 // become unnessecary. Until that point, though, this seems preferable to | |
| 704 // adding a bunch of notification plumbing to Fullscreen. | |
| 705 if (!Fullscreen::isFullscreenElement(*m_layer.source())) { | |
| 706 // TODO(mthiesse): Due to asynchronous resizing, we might get kicked out of | |
| 707 // fullscreen when changing display parameters upon entering WebVR. So one | |
| 708 // time only, we reenter fullscreen after having left it; otherwise we exit | |
| 709 // presentation. | |
| 710 if (m_reenteredFullscreen) { | |
| 711 m_isPresenting = false; | |
| 712 OnPresentChange(); | |
| 713 m_fullscreenCheckTimer.stop(); | |
| 714 if (m_display) | |
| 715 m_display->ExitPresent(); | |
| 716 return; | |
| 717 } | |
| 718 m_reenteredFullscreen = true; | |
| 719 auto canvas = m_layer.source(); | |
| 720 Document* doc = this->document(); | |
| 721 std::unique_ptr<UserGestureIndicator> gestureIndicator; | |
| 722 if (doc) { | |
| 723 gestureIndicator = WTF::wrapUnique( | |
| 724 new UserGestureIndicator(DocumentUserGestureToken::create( | |
| 725 doc, UserGestureToken::Status::PossiblyExistingGesture))); | |
| 726 } | |
| 727 Fullscreen::requestFullscreen(*canvas); | |
| 728 } | |
| 729 } | |
| 730 | |
| 731 ScriptedAnimationController& VRDisplay::ensureScriptedAnimationController( | 1165 ScriptedAnimationController& VRDisplay::ensureScriptedAnimationController( |
| 732 Document* doc) { | 1166 Document* doc) { |
| 733 if (!m_scriptedAnimationController) | 1167 if (!m_scriptedAnimationController) |
| 734 m_scriptedAnimationController = ScriptedAnimationController::create(doc); | 1168 m_scriptedAnimationController = ScriptedAnimationController::create(doc); |
| 735 | 1169 |
| 736 return *m_scriptedAnimationController; | 1170 return *m_scriptedAnimationController; |
| 737 } | 1171 } |
| 738 | 1172 |
| 739 void VRDisplay::dispose() { | 1173 void VRDisplay::dispose() { |
| 740 m_binding.Close(); | 1174 m_binding.Close(); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 766 visitor->trace(m_stageParameters); | 1200 visitor->trace(m_stageParameters); |
| 767 visitor->trace(m_eyeParametersLeft); | 1201 visitor->trace(m_eyeParametersLeft); |
| 768 visitor->trace(m_eyeParametersRight); | 1202 visitor->trace(m_eyeParametersRight); |
| 769 visitor->trace(m_layer); | 1203 visitor->trace(m_layer); |
| 770 visitor->trace(m_renderingContext); | 1204 visitor->trace(m_renderingContext); |
| 771 visitor->trace(m_scriptedAnimationController); | 1205 visitor->trace(m_scriptedAnimationController); |
| 772 visitor->trace(m_pendingPresentResolvers); | 1206 visitor->trace(m_pendingPresentResolvers); |
| 773 } | 1207 } |
| 774 | 1208 |
| 775 } // namespace blink | 1209 } // namespace blink |
| OLD | NEW |