OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 * | 9 * |
10 */ | 10 */ |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
114 RTC_CHECK_GE(width, 0); | 114 RTC_CHECK_GE(width, 0); |
115 RTC_CHECK_GE(height, 0); | 115 RTC_CHECK_GE(height, 0); |
116 int ret = av_image_check_size(static_cast<unsigned int>(width), | 116 int ret = av_image_check_size(static_cast<unsigned int>(width), |
117 static_cast<unsigned int>(height), 0, nullptr); | 117 static_cast<unsigned int>(height), 0, nullptr); |
118 if (ret < 0) { | 118 if (ret < 0) { |
119 LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height; | 119 LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height; |
120 decoder->ReportError(); | 120 decoder->ReportError(); |
121 return ret; | 121 return ret; |
122 } | 122 } |
123 | 123 |
124 // The video frame is stored in |video_frame|. |av_frame| is FFmpeg's version | 124 // The video frame is stored in |frame_buffer|. |av_frame| is FFmpeg's version |
125 // of a video frame and will be set up to reference |video_frame|'s buffers. | 125 // of a video frame and will be set up to reference |frame_buffer|'s data. |
126 | |
127 // TODO(nisse): The VideoFrame's timestamp and rotation info is not used. | |
128 // Refactor to do not use a VideoFrame object at all. | |
129 | 126 |
130 // FFmpeg expects the initial allocation to be zero-initialized according to | 127 // FFmpeg expects the initial allocation to be zero-initialized according to |
131 // http://crbug.com/390941. Our pool is set up to zero-initialize new buffers. | 128 // http://crbug.com/390941. Our pool is set up to zero-initialize new buffers. |
132 VideoFrame* video_frame = new VideoFrame( | 129 // TODO(nisse): Delete that feature from the video pool, instead add |
133 decoder->pool_.CreateBuffer(width, height), | 130 // an explicit call to InitializeData here. |
134 0 /* timestamp */, 0 /* render_time_ms */, kVideoRotation_0); | 131 rtc::scoped_refptr<I420Buffer> frame_buffer = |
| 132 decoder->pool_.CreateBuffer(width, height); |
135 | 133 |
| 134 int y_size = width * height; |
| 135 int uv_size = ((width + 1) / 2) * ((height + 1) / 2); |
136 // DCHECK that we have a continuous buffer as is required. | 136 // DCHECK that we have a continuous buffer as is required. |
137 RTC_DCHECK_EQ(video_frame->video_frame_buffer()->DataU(), | 137 RTC_DCHECK_EQ(frame_buffer->DataU(), frame_buffer->DataY() + y_size); |
138 video_frame->video_frame_buffer()->DataY() + | 138 RTC_DCHECK_EQ(frame_buffer->DataV(), frame_buffer->DataU() + uv_size); |
139 video_frame->allocated_size(kYPlane)); | 139 int total_size = y_size + 2 * uv_size; |
140 RTC_DCHECK_EQ(video_frame->video_frame_buffer()->DataV(), | |
141 video_frame->video_frame_buffer()->DataU() + | |
142 video_frame->allocated_size(kUPlane)); | |
143 int total_size = video_frame->allocated_size(kYPlane) + | |
144 video_frame->allocated_size(kUPlane) + | |
145 video_frame->allocated_size(kVPlane); | |
146 | 140 |
147 av_frame->format = context->pix_fmt; | 141 av_frame->format = context->pix_fmt; |
148 av_frame->reordered_opaque = context->reordered_opaque; | 142 av_frame->reordered_opaque = context->reordered_opaque; |
149 | 143 |
150 // Set |av_frame| members as required by FFmpeg. | 144 // Set |av_frame| members as required by FFmpeg. |
151 av_frame->data[kYPlaneIndex] = | 145 av_frame->data[kYPlaneIndex] = frame_buffer->MutableDataY(); |
152 video_frame->video_frame_buffer()->MutableDataY(); | 146 av_frame->linesize[kYPlaneIndex] = frame_buffer->StrideY(); |
153 av_frame->linesize[kYPlaneIndex] = | 147 av_frame->data[kUPlaneIndex] = frame_buffer->MutableDataU(); |
154 video_frame->video_frame_buffer()->StrideY(); | 148 av_frame->linesize[kUPlaneIndex] = frame_buffer->StrideU(); |
155 av_frame->data[kUPlaneIndex] = | 149 av_frame->data[kVPlaneIndex] = frame_buffer->MutableDataV(); |
156 video_frame->video_frame_buffer()->MutableDataU(); | 150 av_frame->linesize[kVPlaneIndex] = frame_buffer->StrideV(); |
157 av_frame->linesize[kUPlaneIndex] = | |
158 video_frame->video_frame_buffer()->StrideU(); | |
159 av_frame->data[kVPlaneIndex] = | |
160 video_frame->video_frame_buffer()->MutableDataV(); | |
161 av_frame->linesize[kVPlaneIndex] = | |
162 video_frame->video_frame_buffer()->StrideV(); | |
163 RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data); | 151 RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data); |
164 | 152 |
165 av_frame->buf[0] = av_buffer_create(av_frame->data[kYPlaneIndex], | 153 // Create a VideoFrame object, to keep a reference to the buffer. |
166 total_size, | 154 // TODO(nisse): The VideoFrame's timestamp and rotation info is not used. |
167 AVFreeBuffer2, | 155 // Refactor to do not use a VideoFrame object at all. |
168 static_cast<void*>(video_frame), | 156 av_frame->buf[0] = av_buffer_create( |
169 0); | 157 av_frame->data[kYPlaneIndex], |
| 158 total_size, |
| 159 AVFreeBuffer2, |
| 160 static_cast<void*>(new VideoFrame(frame_buffer, |
| 161 0 /* timestamp */, |
| 162 0 /* render_time_ms */, |
| 163 kVideoRotation_0)), |
| 164 0); |
170 RTC_CHECK(av_frame->buf[0]); | 165 RTC_CHECK(av_frame->buf[0]); |
171 return 0; | 166 return 0; |
172 } | 167 } |
173 | 168 |
174 void H264DecoderImpl::AVFreeBuffer2(void* opaque, uint8_t* data) { | 169 void H264DecoderImpl::AVFreeBuffer2(void* opaque, uint8_t* data) { |
175 // The buffer pool recycles the buffer used by |video_frame| when there are no | 170 // The buffer pool recycles the buffer used by |video_frame| when there are no |
176 // more references to it. |video_frame| is a thin buffer holder and is not | 171 // more references to it. |video_frame| is a thin buffer holder and is not |
177 // recycled. | 172 // recycled. |
178 VideoFrame* video_frame = static_cast<VideoFrame*>(opaque); | 173 VideoFrame* video_frame = static_cast<VideoFrame*>(opaque); |
179 delete video_frame; | 174 delete video_frame; |
(...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
415 void H264DecoderImpl::ReportError() { | 410 void H264DecoderImpl::ReportError() { |
416 if (has_reported_error_) | 411 if (has_reported_error_) |
417 return; | 412 return; |
418 RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event", | 413 RTC_HISTOGRAM_ENUMERATION("WebRTC.Video.H264DecoderImpl.Event", |
419 kH264DecoderEventError, | 414 kH264DecoderEventError, |
420 kH264DecoderEventMax); | 415 kH264DecoderEventMax); |
421 has_reported_error_ = true; | 416 has_reported_error_ = true; |
422 } | 417 } |
423 | 418 |
424 } // namespace webrtc | 419 } // namespace webrtc |
OLD | NEW |