Chromium Code Reviews| Index: chrome/browser/android/vr_shell/vr_shell_gl.cc |
| diff --git a/chrome/browser/android/vr_shell/vr_shell_gl.cc b/chrome/browser/android/vr_shell/vr_shell_gl.cc |
| index 55a1c7ee9d4ff363953955bc80793b53791daf34..73291b9c5e3fd393cd210ce3e98f8a5284134cba 100644 |
| --- a/chrome/browser/android/vr_shell/vr_shell_gl.cc |
| +++ b/chrome/browser/android/vr_shell/vr_shell_gl.cc |
| @@ -4,6 +4,7 @@ |
| #include "chrome/browser/android/vr_shell/vr_shell_gl.h" |
| +#include "base/android/jni_android.h" |
| #include "base/memory/ptr_util.h" |
| #include "base/metrics/histogram_macros.h" |
| #include "base/threading/thread_task_runner_handle.h" |
| @@ -23,6 +24,8 @@ |
| #include "ui/gl/gl_context.h" |
| #include "ui/gl/gl_surface.h" |
| #include "ui/gl/init/gl_factory.h" |
| +#include "gpu/ipc/common/gpu_surface_tracker.h" |
| +#include "gpu/ipc/common/surface_handle.h" |
| namespace vr_shell { |
| @@ -32,6 +35,11 @@ static constexpr long kPredictionTimeWithoutVsyncNanos = 50000000; |
| static constexpr float kZNear = 0.1f; |
| static constexpr float kZFar = 1000.0f; |
| +#include <android/native_window.h> |
| +#include <android/native_window_jni.h> |
| + |
| +#include <iomanip> |
| + |
| // Screen angle in degrees. 0 = vertical, positive = top closer. |
| static constexpr float kDesktopScreenTiltDefault = 0; |
| @@ -81,10 +89,6 @@ static constexpr gvr::Rectf kHeadlockedBufferFov = {20.f, 20.f, 20.f, 20.f}; |
| static constexpr int kViewportListPrimaryOffset = 0; |
| static constexpr int kViewportListHeadlockedOffset = 2; |
| -// Magic numbers used to mark valid pose index values encoded in frame |
| -// data. Must match the magic numbers used in blink's VRDisplay.cpp. |
| -static constexpr std::array<uint8_t, 2> kWebVrPosePixelMagicNumbers{{42, 142}}; |
| - |
| float Distance(const gvr::Vec3f& vec1, const gvr::Vec3f& vec2) { |
| float xdiff = (vec1.x - vec2.x); |
| float ydiff = (vec1.y - vec2.y); |
| @@ -141,39 +145,26 @@ enum class ViewerType { |
| VIEWER_TYPE_MAX, |
| }; |
| -int GetPixelEncodedPoseIndexByte() { |
| - TRACE_EVENT0("gpu", "VrShellGl::GetPixelEncodedPoseIndex"); |
| - // Read the pose index encoded in a bottom left pixel as color values. |
| - // See also third_party/WebKit/Source/modules/vr/VRDisplay.cpp which |
| - // encodes the pose index, and device/vr/android/gvr/gvr_device.cc |
| - // which tracks poses. Returns the low byte (0..255) if valid, or -1 |
| - // if not valid due to bad magic number. |
| - uint8_t pixels[4]; |
| - // Assume we're reading from the framebuffer we just wrote to. |
| - // That's true currently, we may need to use glReadBuffer(GL_BACK) |
| - // or equivalent if the rendering setup changes in the future. |
| - glReadPixels(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, pixels); |
| - |
| - // Check for the magic number written by VRDevice.cpp on submit. |
| - // This helps avoid glitches from garbage data in the render |
| - // buffer that can appear during initialization or resizing. These |
| - // often appear as flashes of all-black or all-white pixels. |
| - if (pixels[1] == kWebVrPosePixelMagicNumbers[0] && |
| - pixels[2] == kWebVrPosePixelMagicNumbers[1]) { |
| - // Pose is good. |
| - return pixels[0]; |
| - } |
| - VLOG(1) << "WebVR: reject decoded pose index " << (int)pixels[0] << |
| - ", bad magic number " << (int)pixels[1] << ", " << (int)pixels[2]; |
| - return -1; |
| -} |
| +} // namespace |
| int64_t TimeInMicroseconds() { |
| return std::chrono::duration_cast<std::chrono::microseconds>( |
| std::chrono::steady_clock::now().time_since_epoch()).count(); |
| } |
| -} // namespace |
| +uint32_t GetPixelEncodedPoseIndex() { |
| + TRACE_EVENT0("gpu", "VrShell::GetPixelEncodedPoseIndex"); |
| + // Read the pose index encoded in a bottom left pixel as color values. |
| + // See also third_party/WebKit/Source/modules/vr/VRDisplay.cpp which |
| + // encodes the pose index, and device/vr/android/gvr/gvr_device.cc |
| + // which tracks poses. |
| + uint8_t pixels[4]; |
| + // Assume we're reading from the framebuffer we just wrote to. |
| + // That's true currently, we may need to use glReadBuffer(GL_BACK) |
| + // or equivalent if the rendering setup changes in the future. |
| + glReadPixels(0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, pixels); |
| + return pixels[0] | (pixels[1] << 8) | (pixels[2] << 16); |
| +} |
| VrShellGl::VrShellGl( |
| const base::WeakPtr<VrShell>& weak_vr_shell, |
| @@ -202,13 +193,19 @@ bool VrShellGl::Initialize() { |
| gvr::Mat4f identity; |
| SetIdentityM(identity); |
| webvr_head_pose_.resize(kPoseRingBufferSize, identity); |
| - webvr_head_pose_valid_.resize(kPoseRingBufferSize, false); |
| + webvr_time_frame_start_.resize(kPoseRingBufferSize, 0.0); |
| + webvr_time_get_pose_.resize(kPoseRingBufferSize, 0.0); |
| + webvr_time_got_pose_.resize(kPoseRingBufferSize, 0.0); |
| + webvr_time_submit_.resize(kPoseRingBufferSize, 0.0); |
| + webvr_time_surfaced_.resize(kPoseRingBufferSize, 0.0); |
| + webvr_time_acquired_.resize(kPoseRingBufferSize, 0.0); |
| - draw_task_.Reset(base::Bind(&VrShellGl::DrawFrame, base::Unretained(this))); |
| + draw_task_.Reset(base::Bind(&VrShellGl::DrawFrame, base::Unretained(this), 0)); |
| scene_.reset(new UiScene); |
| InitializeRenderer(); |
| + VLOG(1) << __FUNCTION__ << ": Destructor for presenting delegate and its gvr_api"; |
| ScheduleNextDrawFrame(); |
| return true; |
| @@ -250,24 +247,36 @@ bool VrShellGl::InitializeGl() { |
| LOG(ERROR) << "No VSync Provider"; |
| } |
| - unsigned int textures[2]; |
| - glGenTextures(2, textures); |
| + unsigned int textures[3]; |
| + glGenTextures(3, textures); |
| ui_texture_id_ = textures[0]; |
| content_texture_id_ = textures[1]; |
| + webvr_texture_id_ = textures[2]; |
| + |
| ui_surface_texture_ = gl::SurfaceTexture::Create(ui_texture_id_); |
| content_surface_texture_ = gl::SurfaceTexture::Create(content_texture_id_); |
| + webvr_surface_texture_ = gl::SurfaceTexture::Create(webvr_texture_id_); |
| + |
| ui_surface_.reset(new gl::ScopedJavaSurface(ui_surface_texture_.get())); |
| content_surface_.reset(new gl::ScopedJavaSurface( |
| content_surface_texture_.get())); |
| + webvr_surface_.reset(new gl::ScopedJavaSurface( |
| + webvr_surface_texture_.get())); |
| + |
| ui_surface_texture_->SetFrameAvailableCallback(base::Bind( |
| &VrShellGl::OnUIFrameAvailable, weak_ptr_factory_.GetWeakPtr())); |
| content_surface_texture_->SetFrameAvailableCallback(base::Bind( |
| &VrShellGl::OnContentFrameAvailable, weak_ptr_factory_.GetWeakPtr())); |
| + webvr_surface_texture_->SetFrameAvailableCallback(base::Bind( |
| + &VrShellGl::OnWebVrFrameAvailable, weak_ptr_factory_.GetWeakPtr())); |
| content_surface_texture_->SetDefaultBufferSize( |
| content_tex_physical_size_.width, content_tex_physical_size_.height); |
| ui_surface_texture_->SetDefaultBufferSize(ui_tex_physical_size_.width, |
| ui_tex_physical_size_.height); |
| + // Set a small default surface size for WebVR since we may not need it. |
| + // Will be resized as needed. |
| + webvr_surface_texture_->SetDefaultBufferSize(1, 1); |
| main_thread_task_runner_->PostTask(FROM_HERE, base::Bind( |
| &VrShell::SurfacesChanged, weak_vr_shell_, |
| @@ -284,6 +293,28 @@ void VrShellGl::OnContentFrameAvailable() { |
| content_surface_texture_->UpdateTexImage(); |
| } |
| +void VrShellGl::OnWebVrFrameAvailable() { |
| + if (!webvr_pending_poses_.size()) { |
| + VLOG(2) << __FUNCTION__ << ": No pending pose, saving this frame for later"; |
| + ++webvr_already_available_frames_; |
| + return; |
| + } |
| + uint32_t pose_index = webvr_pending_poses_.front(); |
| + webvr_pending_poses_.pop_front(); |
| + VLOG(2) << __FUNCTION__ << ": receiving WebVR frame " << pose_index; |
| + webvr_surface_texture_->UpdateTexImage(); |
| + auto remaining_poses = webvr_pending_poses_.size(); |
| + if (remaining_poses > 0) { |
| + VLOG(2) << __FUNCTION__ << ": still have " << remaining_poses << " pose(s) queued up."; |
| + } |
| + //long frameTimestamp = mWebVrSurfaceTexture.getTimestamp(); |
| + //if (frameTimestamp == 0) { |
| + // Log.w(TAG, "Invalid timestamp for frame on WebVR waiting list. This should not happen."); |
| + // return; |
| + //} |
| + DrawFrame(pose_index); |
| +} |
| + |
| void VrShellGl::GvrInit(gvr_context* gvr_api) { |
| gvr_api_ = gvr::GvrApi::WrapNonOwned(gvr_api); |
| controller_.reset(new VrController(gvr_api)); |
| @@ -306,21 +337,12 @@ void VrShellGl::GvrInit(gvr_context* gvr_api) { |
| } |
| void VrShellGl::InitializeRenderer() { |
| - // While WebVR is going through the compositor path, it shares |
| - // the same texture ID. This will change once it gets its own |
| - // surface, but store it separately to avoid future confusion. |
| - // TODO(klausw,crbug.com/655722): remove this. |
| - webvr_texture_id_ = content_texture_id_; |
| - // Out of paranoia, explicitly reset the "pose valid" flags to false |
| - // from the GL thread. The constructor ran in the UI thread. |
| - // TODO(klausw,crbug.com/655722): remove this. |
| - webvr_head_pose_valid_.assign(kPoseRingBufferSize, false); |
| - |
| gvr_api_->InitializeGl(); |
| std::vector<gvr::BufferSpec> specs; |
| // For kFramePrimaryBuffer (primary VrShell and WebVR content) |
| specs.push_back(gvr_api_->CreateBufferSpec()); |
| render_size_primary_ = specs[kFramePrimaryBuffer].GetSize(); |
| + render_size_primary_vrshell_ = render_size_primary_; |
| // For kFrameHeadlockedBuffer (for WebVR insecure content warning). |
| // Set this up at fixed resolution, the (smaller) FOV gets set below. |
| @@ -400,6 +422,7 @@ void VrShellGl::UpdateController(const gvr::Vec3f& forward_vector) { |
| // Also send tap events for controller "touchpad click" events. |
| if (touch_pending_ || controller_->ButtonUpHappened( |
| gvr::ControllerButton::GVR_CONTROLLER_BUTTON_CLICK)) { |
| + VLOG(1) << __FUNCTION__ << ": WebVR emulating Cardboard button"; |
| touch_pending_ = false; |
| std::unique_ptr<WebGestureEvent> gesture(new WebGestureEvent()); |
| gesture->sourceDevice = blink::WebGestureDeviceTouchpad; |
| @@ -565,6 +588,21 @@ void VrShellGl::SendEventsToTarget(InputTarget input_target, |
| } |
| } |
| +static double getMonotonicTimestampMs() { |
| + // TODO(klausw): replace with TimeInMicroseconds()? |
| +#if defined(OS_ANDROID) |
| + // Android surfacetexture timestamp compatible timer? See: |
| + // http://androidxref.com/7.0.0_r1/xref/frameworks/native/libs/gui/Surface.cpp#370 |
| + // http://androidxref.com/7.0.0_r1/xref/frameworks/rs/rsCppUtils.h#162 |
| + struct timespec t; |
| + t.tv_sec = t.tv_nsec = 0; |
| + clock_gettime(CLOCK_MONOTONIC, &t); |
| + return t.tv_sec * 1e3 + t.tv_nsec * 1e-6; |
| +#else |
| + return 0.0; |
| +#endif |
| +} |
| + |
| void VrShellGl::SendGesture(InputTarget input_target, |
| std::unique_ptr<blink::WebInputEvent> event) { |
| DCHECK(input_target != InputTarget::NONE); |
| @@ -577,45 +615,74 @@ void VrShellGl::SendGesture(InputTarget input_target, |
| base::Passed(std::move(event)))); |
| } |
| -void VrShellGl::SetGvrPoseForWebVr(const gvr::Mat4f& pose, uint32_t pose_num) { |
| - webvr_head_pose_[pose_num % kPoseRingBufferSize] = pose; |
| - webvr_head_pose_valid_[pose_num % kPoseRingBufferSize] = true; |
| +void VrShellGl::SetWebVRGvrPose( |
| + const gvr::Mat4f& pose, uint32_t pose_index, int64_t pose_time_nanos) { |
| + webvr_head_pose_[pose_index % kPoseRingBufferSize] = pose; |
| + webvr_time_submit_[pose_index % kPoseRingBufferSize] = 0.0; |
| } |
| -bool VrShellGl::WebVrPoseByteIsValid(int pose_index_byte) { |
| - if (pose_index_byte < 0) { |
| - return false; |
| - } |
| - if (!webvr_head_pose_valid_[pose_index_byte % kPoseRingBufferSize]) { |
| - VLOG(1) << "WebVR: reject decoded pose index " << pose_index_byte << |
| - ", not a valid pose"; |
| - return false; |
| - } |
| - return true; |
| -} |
| - |
| -void VrShellGl::DrawFrame() { |
| - TRACE_EVENT0("gpu", "VrShellGl::DrawFrame"); |
| +void VrShellGl::DrawFrame(uint32_t pose_index) { |
| + TRACE_EVENT1("gpu", "VrShellGl::DrawFrame", "frame", pose_index); |
| // Reset the viewport list to just the pair of viewports for the |
| // primary buffer each frame. Head-locked viewports get added by |
| // DrawVrShell if needed. |
| buffer_viewport_list_->SetToRecommendedBufferViewports(); |
| - gvr::Frame frame = swap_chain_->AcquireFrame(); |
| - gvr::ClockTimePoint target_time = gvr::GvrApi::GetTimePointNow(); |
| - target_time.monotonic_system_time_nanos += kPredictionTimeWithoutVsyncNanos; |
| + if (web_vr_mode_) { |
| + if (!pose_index) { |
| + LOG(INFO) << "klausw:DrawFrame: no pose index, not drawing WebVR."; |
| + return; |
| + } |
| - gvr::Mat4f head_pose = |
| - gvr_api_->GetHeadSpaceFromStartSpaceRotation(target_time); |
| + webvr_time_surfaced_[pose_index % kPoseRingBufferSize] = getMonotonicTimestampMs(); |
| - gvr::Vec3f position = GetTranslation(head_pose); |
| - if (position.x == 0.0f && position.y == 0.0f && position.z == 0.0f) { |
| - // This appears to be a 3DOF pose without a neck model. Add one. |
| - // The head pose has redundant data. Assume we're only using the |
| - // object_from_reference_matrix, we're not updating position_external. |
| - // TODO: Not sure what object_from_reference_matrix is. The new api removed |
| - // it. For now, removing it seems working fine. |
| - gvr_api_->ApplyNeckModel(head_pose, 1.0f); |
| + // If needed, resize the primary buffer for use with WebVR. |
| + if (render_size_primary_ != render_size_primary_webvr_) { |
| + if (!render_size_primary_webvr_.width) { |
| + VLOG(2) << "WebVR rendering size not known yet, dropping frame"; |
| + return; |
| + } |
| + VLOG(1) << "WebVR set size " << render_size_primary_webvr_.width << "x" << render_size_primary_webvr_.height; |
| + render_size_primary_ = render_size_primary_webvr_; |
| + swap_chain_->ResizeBuffer(kFramePrimaryBuffer, render_size_primary_); |
| + } |
| + } else { |
| + if (render_size_primary_ != render_size_primary_vrshell_) { |
| + VLOG(1) << "WebVR restore size " << render_size_primary_vrshell_.width << "x" << render_size_primary_vrshell_.height; |
| + render_size_primary_ = render_size_primary_vrshell_; |
| + swap_chain_->ResizeBuffer(kFramePrimaryBuffer, render_size_primary_); |
| + } |
| + } |
| + |
| + gvr::Mat4f head_pose; |
| + gvr::Frame frame = gvr::Frame(nullptr); |
| + { |
| + TRACE_EVENT0("gpu", "AcquireFrame"); |
| + frame = swap_chain_->AcquireFrame(); |
| + } |
| + |
| + if (web_vr_mode_) { |
| + int idx = pose_index % kPoseRingBufferSize; |
| + webvr_time_acquired_[idx] = getMonotonicTimestampMs(); |
| + // TODO(klausw): report acquire time back to JS? If it blocks, |
| + // completion time == VSYNC time or a close approximation which |
| + // may be useful. But not helpful if it doesn't block. |
| + head_pose = webvr_head_pose_[idx]; |
| + } else { |
| + gvr::ClockTimePoint target_time = gvr::GvrApi::GetTimePointNow(); |
| + target_time.monotonic_system_time_nanos += kPredictionTimeWithoutVsyncNanos; |
| + |
| + head_pose = gvr_api_->GetHeadSpaceFromStartSpaceRotation(target_time); |
| + |
| + gvr::Vec3f position = GetTranslation(head_pose); |
| + if (position.x == 0.0f && position.y == 0.0f && position.z == 0.0f) { |
| + // This appears to be a 3DOF pose without a neck model. Add one. |
| + // The head pose has redundant data. Assume we're only using the |
| + // object_from_reference_matrix, we're not updating position_external. |
| + // TODO: Not sure what object_from_reference_matrix is. The new api |
| + // removed it. For now, removing it seems working fine. |
| + gvr_api_->ApplyNeckModel(head_pose, 1.0f); |
| + } |
| } |
| frame.BindBuffer(kFramePrimaryBuffer); |
| @@ -627,45 +694,86 @@ void VrShellGl::DrawFrame() { |
| UpdateController(GetForwardVector(head_pose)); |
| if (web_vr_mode_) { |
| - DrawWebVr(); |
| - |
| - // When using async reprojection, we need to know which pose was used in |
| - // the WebVR app for drawing this frame. Due to unknown amounts of |
| - // buffering in the compositor and SurfaceTexture, we read the pose number |
| - // from a corner pixel. There's no point in doing this for legacy |
| - // distortion rendering since that doesn't need a pose, and reading back |
| - // pixels is an expensive operation. TODO(klausw,crbug.com/655722): stop |
| - // doing this once we have working no-compositor rendering for WebVR. |
| - if (gvr_api_->GetAsyncReprojectionEnabled()) { |
| - int pose_index_byte = GetPixelEncodedPoseIndexByte(); |
| - if (WebVrPoseByteIsValid(pose_index_byte)) { |
| - // We have a valid pose, use it for reprojection. |
| - webvr_left_viewport_->SetReprojection(GVR_REPROJECTION_FULL); |
| - webvr_right_viewport_->SetReprojection(GVR_REPROJECTION_FULL); |
| - head_pose = webvr_head_pose_[pose_index_byte % kPoseRingBufferSize]; |
| - // We can't mark the used pose as invalid since unfortunately |
| - // we have to reuse them. The compositor will re-submit stale |
| - // frames on vsync, and we can't tell that this has happened |
| - // until we've read the pose index from it, and at that point |
| - // it's too late to skip rendering. |
| - } else { |
| - // If we don't get a valid frame ID back we shouldn't attempt |
| - // to reproject by an invalid matrix, so turn off reprojection |
| - // instead. Invalid poses can permanently break reprojection |
| - // for this GVR instance: http://crbug.com/667327 |
| - webvr_left_viewport_->SetReprojection(GVR_REPROJECTION_NONE); |
| - webvr_right_viewport_->SetReprojection(GVR_REPROJECTION_NONE); |
| - } |
| + DrawWebVr(pose_index); |
| +#define CHECK_FRAME_COUNTER_PIXEL_FOR_DEBUGGING 0 |
| +#if CHECK_FRAME_COUNTER_PIXEL_FOR_DEBUGGING |
| + uint32_t pixel_pose = GetPixelEncodedPoseIndex(); |
| + if (pixel_pose != pose_index) { |
| + LOG(ERROR) << __FUNCTION__ << ": poses got out of sync, pixel=" << pixel_pose << " != pose_index=" << pose_index; |
| } |
| +#endif |
| } |
| DrawVrShell(head_pose, frame); |
| frame.Unbind(); |
| + glFlush(); |
| frame.Submit(*buffer_viewport_list_, head_pose); |
| // No need to SwapBuffers for an offscreen surface. |
| - ScheduleNextDrawFrame(); |
| + // TODO(klausw): is this true? Test with async reprojection off. |
| +#if 0 |
| + if (web_vr_mode_ && !gvr_api_->GetAsyncReprojectionEnabled()) { |
| + // WebVR uses RENDERMODE_WHEN_DIRTY for the Java glSurfaceView, |
| + // and never actually marks frames as dirty. We need to manually |
| + // swap buffers if not using reprojection since the GvrLayout |
| + // won't do it for us. |
| + eglSwapBuffers(eglGetDisplay(EGL_DEFAULT_DISPLAY), |
| + eglGetCurrentSurface(EGL_DRAW)); |
| + } |
| +#endif |
| + |
| + if (web_vr_mode_) { |
| + double submit_time = webvr_time_submit_[pose_index % kPoseRingBufferSize]; |
| + |
| + double prev_submit_time = 0.0; |
| + for (int i = 1; i < kPoseRingBufferSize; ++i) { |
| + int offset = kPoseRingBufferSize - i; |
| + prev_submit_time = webvr_time_submit_[ |
| + (pose_index + offset) % kPoseRingBufferSize]; |
| + if (prev_submit_time != 0.0) |
| + break; |
| + } |
| + |
| + int frameI = 1; |
| + if (prev_submit_time != 0.0) { |
| + // Rounded integer "frames taken" assuming 60Hz base rate. |
| + frameI = ((submit_time - prev_submit_time) * 60 / 1000 + 0.5); |
| + if (!frameI) frameI = 1; |
| + } |
| + |
| + int idx = pose_index % kPoseRingBufferSize; |
| + double frame_start_time = webvr_time_frame_start_[idx]; |
| + double get_pose_time = webvr_time_get_pose_[idx]; |
| + double got_pose_time = webvr_time_got_pose_[idx]; |
| + double surfaced_time = webvr_time_surfaced_[idx]; |
| + double acquired_time = webvr_time_acquired_[idx]; |
| + double drawn_time = getMonotonicTimestampMs(); |
| + |
| + LOG(INFO) << "timing for frame " << pose_index << |
| + ", frameI " << frameI << |
| + ", rAF " << std::fixed << std::setprecision(1) << |
| + (get_pose_time - frame_start_time) << " getPose " << |
| + (got_pose_time - get_pose_time) << " gotPose " << |
| + (submit_time - got_pose_time) << " submit " << |
| + (surfaced_time - submit_time) << " surfaced " << |
| + (acquired_time - surfaced_time) << " acquired " << |
| + (drawn_time - acquired_time) << " drawn "; |
| + |
| + // TODO(klausw): can completion reporting be moved earlier? I |
| + // tried doing so right after AcquireFrame, but then framerate was |
| + // very wobbly. Try moving the callback after DrawWebVr + |
| + // glFlush() (not finish) to see if that helps? |
| + main_thread_task_runner_->PostTask(FROM_HERE, base::Bind( |
| + &VrShell::OnWebVRFrameSubmitted, weak_vr_shell_, |
| + webvr_surface_handle_, |
| + pose_index, |
| + acquired_time - submit_time)); |
| + } else { |
| + // Only request a new scheduled frame in non-WebVR mode. In WebVR mode, |
| + // the next frame will be drawn in response to SubmitFrame. |
| + ScheduleNextDrawFrame(); |
| + } |
| } |
| void VrShellGl::DrawVrShell(const gvr::Mat4f& head_pose, |
| @@ -727,11 +835,152 @@ void VrShellGl::DrawVrShell(const gvr::Mat4f& head_pose, |
| } |
| } |
| -gvr::Sizei VrShellGl::GetWebVRCompositorSurfaceSize() { |
| - // This is a stopgap while we're using the WebVR compositor rendering path. |
| - // TODO(klausw,crbug.com/655722): Remove this method and member once we're |
| - // using a separate WebVR render surface. |
| - return content_tex_physical_size_; |
| +void VrShellGl::GetWebVRSurfaceHandle(int32_t width, int32_t height, const device::mojom::VRDisplay::GetSurfaceHandleCallback& callback) { |
| + VLOG(2) << __FUNCTION__ << ": size=" << width << "x" << height; |
| + |
| + if (!webvr_surface_texture_.get()) { |
| + // We can't set up a surface due to not having a SurfaceTexture. |
| + VLOG(1) << __FUNCTION__ << ": Failed, don't have a SurfaceTexture"; |
| + callback.Run(0); |
|
artem.bolgar
2017/02/14 05:04:24
You can't do this here. The callback may be execut
|
| + return; |
| + } |
| + |
| + if (webvr_surface_handle_) { |
| + // We have a surface, resize if needed. |
| + if (render_size_primary_webvr_.width == width && render_size_primary_webvr_.height == height) { |
| + VLOG(1) << __FUNCTION__ << ": Ignoring redundant call, this matches the current size."; |
| + } else { |
| + render_size_primary_webvr_.width = width; |
| + render_size_primary_webvr_.height = height; |
| + // The size is a bit tricky to change after the fact, see |
| + // SurfaceTexture.setDefaultBufferSize documentation: |
| + // |
| + // For OpenGL ES, the EGLSurface should be destroyed |
| + // (via eglDestroySurface), made not-current (via |
| + // eglMakeCurrent), and then recreated (via |
| + // eglCreateWindowSurface) to ensure that the new |
| + // default size has taken effect. |
| + webvr_surface_texture_->SetDefaultBufferSize(width, height); |
| + } |
| + } else { |
| + // Create a new surface. |
| + render_size_primary_webvr_.width = width; |
| + render_size_primary_webvr_.height = height; |
| + webvr_surface_texture_->SetDefaultBufferSize(width, height); |
| + // Assume we've already created the Surface. |
| + //Java_VrShellImpl_createWebVrRenderSurface(env, j_vr_shell_.obj(), width, height); |
| + SetWebVrSurface(); |
| + } |
| + callback.Run(webvr_surface_handle_); |
|
artem.bolgar
2017/02/14 05:04:24
You can't do this here. The callback may be execut
|
| +} |
| + |
| +void VrShellGl::SetWebVrSurface() { |
| + VLOG(1) << __FUNCTION__ << ": size=" << render_size_primary_webvr_.width << "x" << render_size_primary_webvr_.height << " webvr_texture_id_=" << webvr_texture_id_ << " webvr_surface_handle_=" << webvr_surface_handle_; |
| + |
| + if (webvr_surface_handle_) { |
| + VLOG(1) << __FUNCTION__ << ": ignoring redundant call, already have webvr_surface_handle_=" << webvr_surface_handle_; |
| + return; |
| + } |
| + |
| + // Note: This ensures that any local references used by |
| + // ANativeWindow_fromSurface are released immediately. This is needed as a |
| + // workaround for https://code.google.com/p/android/issues/detail?id=68174 |
| + JNIEnv* env = base::android::AttachCurrentThread(); |
| + base::android::ScopedJavaLocalFrame scoped_local_reference_frame(env); |
| + ANativeWindow* window = ANativeWindow_fromSurface( |
| + env, webvr_surface_->j_surface().obj()); |
| + |
| + // This variant doesn't seem to be working - lookup fails?! |
| + //ANativeWindow* window = webvr_surface_texture_->CreateSurface(); |
| + |
| + gpu::GpuSurfaceTracker* tracker = gpu::GpuSurfaceTracker::Get(); |
| + ANativeWindow_acquire(window); |
| + |
| + // TODO(klausw): is setBuffersGeometry necessary> |
| + ANativeWindow_setBuffersGeometry(window, render_size_primary_webvr_.width, render_size_primary_webvr_.height, WINDOW_FORMAT_RGBA_8888); |
| + |
| + auto handle = tracker->AddSurfaceForNativeWidget(window); |
| + |
| + tracker->RegisterViewSurface(handle, webvr_surface_->j_surface().obj()); |
| + |
| + webvr_surface_handle_ = handle; |
| + |
| + // Now we're ready for child_process_service_impl.cc to fetch it via |
| + // AIDL/Binder by ID. |
| + |
| + ANativeWindow_release(window); |
| + |
| + VLOG(1) << __FUNCTION__ << ": size=" << render_size_primary_webvr_.width << "x" << render_size_primary_webvr_.height << ", webvr_surface_handle_=" << handle; |
| + // TODO(klausw): add cleanup to avoid leaking surfaces: |
| + // |
| + // The caller must release the underlying reference when done with the handle |
| + // by calling ANativeWindow_release(). |
| +} |
| + |
| +void VrShellGl::SubmitWebVRFrame(int32_t surface_handle, const device::mojom::VRPosePtr& pose) { |
| + // uint32_t pose_index, double frameStart, double serviceStart, double getPose, double gotPose, double submit) { |
| + uint32_t pose_index = pose->poseIndex; |
| + |
| + if (surface_handle != webvr_surface_handle_) { |
| + VLOG(2) << __FUNCTION__ << ": ignoring submitted frame for surface " << surface_handle << ", ours is " << webvr_surface_handle_; |
| + } |
| + |
| + TRACE_EVENT1("media", "klausw:VrShell SubmitWebVRFrame", "frame", pose_index); |
| + VLOG(2) << __FUNCTION__ << ": frame " << pose_index; |
| + |
| + webvr_last_submitted_ = pose_index; |
| + |
| + int idx = pose_index % kPoseRingBufferSize; |
| + double submit_time = getMonotonicTimestampMs(); |
| + webvr_time_submit_[idx] = submit_time; |
| + |
| +#if 0 |
| + // Align clocks. TODO(klausw): this assumes submit times being equal, this ignores RPC lag. |
| + double submitTs = pose->ts_submit; |
| + auto fromJS = [=](double t) { return t - submitTs + submit_time; }; |
| + webvr_time_frame_start_[idx] = fromJS(pose->ts_frameStart); |
| + webvr_time_get_pose_[idx] = fromJS(pose->ts_getPose); |
| + webvr_time_got_pose_[idx] = fromJS(pose->ts_gotPose); |
| +#else |
| + webvr_time_frame_start_[idx] = pose->ts_frameStart; |
| + webvr_time_get_pose_[idx] = pose->ts_getPose; |
| + webvr_time_got_pose_[idx] = pose->ts_gotPose; |
| +#endif |
| + |
| + //Java_VrShellImpl_expectWebVrFrame(env, j_vr_shell_.obj(), static_cast<jlong>(pose_index)); |
| + VLOG(1) << __FUNCTION__ << ": expecting WebVR frame " << pose_index; |
| + webvr_pending_poses_.push_back(pose_index); |
| + if (webvr_already_available_frames_ > 0) { |
| + // If the "frame available" was already triggered, draw now. |
| + VLOG(2) << __FUNCTION__ << ": Drawing saved frame now"; |
| + --webvr_already_available_frames_; |
| + OnWebVrFrameAvailable(); |
| + } |
| + |
| + { |
| + TRACE_EVENT1("gpu", "glFinish", "before frame", pose_index); |
| + // This is a load-bearing glFinish. I'm not entirely sure what's |
| + // going on since we haven't actually emitted any GL commands on |
| + // this context since the glFlush at the end of the previous |
| + // frame, but this measurably reduces stalls in AcquireFrame and |
| + // steadies the framerate, at the cost of reducing throughput. It |
| + // effectively aligns rAF calls to be in sync with frame |
| + // completion. |
| + // |
| + // Without the glFinish here, rAF calls stay aligned to vsync, |
| + // with dropped frames to catch up as needed. This looks jankier. |
| + // |
| + // Putting the glFinish at the end of vr_shell's DrawFrame causes |
| + // a larger latency gap than doing it here. |
| + // |
| + // TODO(klausw): try adjusting rAF timing offsets to keep timing |
| + // steady? Or is it possible to tweak pose prediction to handle |
| + // this better? May need cooperation from the JS app to handle |
| + // uneven timing. |
| + // |
| + //if (pose_index % 20 >= 10) |
| + glFinish(); |
| + } |
| } |
| void VrShellGl::DrawUiView(const gvr::Mat4f* head_pose, |
| @@ -863,8 +1112,8 @@ void VrShellGl::DrawCursor(const gvr::Mat4f& render_matrix) { |
| } |
| } |
| -void VrShellGl::DrawWebVr() { |
| - TRACE_EVENT0("gpu", "VrShellGl::DrawWebVr"); |
| +void VrShellGl::DrawWebVr(uint32_t pose_index) { |
| + TRACE_EVENT1("gpu", "VrShellGl::DrawWebVr", "frame", pose_index); |
| // Don't need face culling, depth testing, blending, etc. Turn it all off. |
| glDisable(GL_CULL_FACE); |
| glDepthMask(GL_FALSE); |
| @@ -876,6 +1125,21 @@ void VrShellGl::DrawWebVr() { |
| glViewport(0, 0, render_size_primary_.width, render_size_primary_.height); |
| vr_shell_renderer_->GetWebVrRenderer()->Draw(webvr_texture_id_); |
| + if (!webvr_texture_bounds_need_update_at_.empty()) { |
| + uint32_t next_at = webvr_texture_bounds_need_update_at_.front(); |
| + VLOG(2) << __FUNCTION__ << ": bounds update at " << next_at; |
| + if (next_at <= pose_index && pose_index - next_at < 0x40000000) { |
| + auto left_bounds = webvr_texture_bounds_left_.front(); |
| + auto right_bounds = webvr_texture_bounds_right_.front(); |
| + webvr_texture_bounds_need_update_at_.pop_front(); |
| + webvr_texture_bounds_left_.pop_front(); |
| + webvr_texture_bounds_right_.pop_front(); |
| + VLOG(2) << __FUNCTION__ << ": Update texture bounds, left l=" << left_bounds.left << ",r=" << left_bounds.right << ",t=" << left_bounds.top << ",b=" << left_bounds.bottom; |
| + webvr_left_viewport_->SetSourceUv(left_bounds); |
| + webvr_right_viewport_->SetSourceUv(right_bounds); |
| + } |
| + } |
| + |
| buffer_viewport_list_->SetBufferViewport(GVR_LEFT_EYE, |
| *webvr_left_viewport_); |
| buffer_viewport_list_->SetBufferViewport(GVR_RIGHT_EYE, |
| @@ -897,18 +1161,22 @@ void VrShellGl::OnResume() { |
| gvr_api_->RefreshViewerProfile(); |
| gvr_api_->ResumeTracking(); |
| controller_->OnResume(); |
| - draw_task_.Reset(base::Bind(&VrShellGl::DrawFrame, base::Unretained(this))); |
| + draw_task_.Reset(base::Bind(&VrShellGl::DrawFrame, base::Unretained(this), 0)); |
| ScheduleNextDrawFrame(); |
| } |
| void VrShellGl::SetWebVrMode(bool enabled) { |
| + VLOG(1) << __FUNCTION__ << ": enabled=" << enabled; |
| web_vr_mode_ = enabled; |
| } |
| -void VrShellGl::UpdateWebVRTextureBounds(const gvr::Rectf& left_bounds, |
| +void VrShellGl::UpdateWebVRTextureBounds(uint32_t for_pose_index, |
| + const gvr::Rectf& left_bounds, |
| const gvr::Rectf& right_bounds) { |
| - webvr_left_viewport_->SetSourceUv(left_bounds); |
| - webvr_right_viewport_->SetSourceUv(right_bounds); |
| + VLOG(2) << __FUNCTION__ << ": for_pose_index=" << for_pose_index << " left_bounds l=" << left_bounds.left << ",r=" << left_bounds.right << ",t=" << left_bounds.top << ",b=" << left_bounds.bottom; |
| + webvr_texture_bounds_need_update_at_.push_back(for_pose_index); |
| + webvr_texture_bounds_left_.push_back(left_bounds); |
| + webvr_texture_bounds_right_.push_back(right_bounds); |
| } |
| gvr::GvrApi* VrShellGl::gvr_api() { |