Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(127)

Side by Side Diff: base/profiler/stack_sampling_profiler.cc

Issue 2554123002: Support parallel captures from the StackSamplingProfiler. (Closed)
Patch Set: rebased Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/profiler/stack_sampling_profiler.h" 5 #include "base/profiler/stack_sampling_profiler.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <map>
8 #include <utility> 9 #include <utility>
9 10
11 #include "base/atomic_sequence_num.h"
12 #include "base/atomicops.h"
10 #include "base/bind.h" 13 #include "base/bind.h"
11 #include "base/bind_helpers.h" 14 #include "base/bind_helpers.h"
12 #include "base/callback.h" 15 #include "base/callback.h"
13 #include "base/lazy_instance.h" 16 #include "base/lazy_instance.h"
14 #include "base/location.h" 17 #include "base/location.h"
15 #include "base/macros.h" 18 #include "base/macros.h"
19 #include "base/memory/ptr_util.h"
20 #include "base/memory/singleton.h"
16 #include "base/profiler/native_stack_sampler.h" 21 #include "base/profiler/native_stack_sampler.h"
17 #include "base/synchronization/lock.h" 22 #include "base/synchronization/lock.h"
23 #include "base/threading/thread.h"
24 #include "base/threading/thread_restrictions.h"
18 #include "base/threading/thread_task_runner_handle.h" 25 #include "base/threading/thread_task_runner_handle.h"
19 #include "base/timer/elapsed_timer.h" 26 #include "base/timer/elapsed_timer.h"
20 27
21 namespace base { 28 namespace base {
22 29
23 namespace { 30 namespace {
24 31
25 // Used to ensure only one profiler is running at a time. 32 // This value is used when there is no collection in progress and thus no ID
26 LazyInstance<Lock>::Leaky concurrent_profiling_lock = LAZY_INSTANCE_INITIALIZER; 33 // for referencing the active collection to the SamplingThread.
27 34 const int NULL_COLLECTION_ID = -1;
28 // AsyncRunner ----------------------------------------------------------------
29
30 // Helper class to allow a profiler to be run completely asynchronously from the
31 // initiator, without being concerned with the profiler's lifetime.
32 class AsyncRunner {
33 public:
34 // Sets up a profiler and arranges for it to be deleted on its completed
35 // callback.
36 static void Run(PlatformThreadId thread_id,
37 const StackSamplingProfiler::SamplingParams& params,
38 const StackSamplingProfiler::CompletedCallback& callback);
39
40 private:
41 AsyncRunner();
42
43 // Runs the callback and deletes the AsyncRunner instance. |profiles| is not
44 // const& because it must be passed with std::move.
45 static void RunCallbackAndDeleteInstance(
46 std::unique_ptr<AsyncRunner> object_to_be_deleted,
47 const StackSamplingProfiler::CompletedCallback& callback,
48 scoped_refptr<SingleThreadTaskRunner> task_runner,
49 StackSamplingProfiler::CallStackProfiles profiles);
50
51 std::unique_ptr<StackSamplingProfiler> profiler_;
52
53 DISALLOW_COPY_AND_ASSIGN(AsyncRunner);
54 };
55
56 // static
57 void AsyncRunner::Run(
58 PlatformThreadId thread_id,
59 const StackSamplingProfiler::SamplingParams& params,
60 const StackSamplingProfiler::CompletedCallback &callback) {
61 std::unique_ptr<AsyncRunner> runner(new AsyncRunner);
62 AsyncRunner* temp_ptr = runner.get();
63 temp_ptr->profiler_.reset(
64 new StackSamplingProfiler(thread_id, params,
65 Bind(&AsyncRunner::RunCallbackAndDeleteInstance,
66 Passed(&runner), callback,
67 ThreadTaskRunnerHandle::Get())));
68 // The callback won't be called until after Start(), so temp_ptr will still
69 // be valid here.
70 temp_ptr->profiler_->Start();
71 }
72
73 AsyncRunner::AsyncRunner() {}
74
75 void AsyncRunner::RunCallbackAndDeleteInstance(
76 std::unique_ptr<AsyncRunner> object_to_be_deleted,
77 const StackSamplingProfiler::CompletedCallback& callback,
78 scoped_refptr<SingleThreadTaskRunner> task_runner,
79 StackSamplingProfiler::CallStackProfiles profiles) {
80 callback.Run(std::move(profiles));
81 // Delete the instance on the original calling thread.
82 task_runner->DeleteSoon(FROM_HERE, object_to_be_deleted.release());
83 }
84 35
85 void ChangeAtomicFlags(subtle::Atomic32* flags, 36 void ChangeAtomicFlags(subtle::Atomic32* flags,
86 subtle::Atomic32 set, 37 subtle::Atomic32 set,
87 subtle::Atomic32 clear) { 38 subtle::Atomic32 clear) {
88 DCHECK(set != 0 || clear != 0); 39 DCHECK(set != 0 || clear != 0);
89 DCHECK_EQ(0, set & clear); 40 DCHECK_EQ(0, set & clear);
90 41
91 subtle::Atomic32 bits = subtle::NoBarrier_Load(flags); 42 subtle::Atomic32 bits = subtle::NoBarrier_Load(flags);
92 while (true) { 43 while (true) {
93 subtle::Atomic32 existing = 44 subtle::Atomic32 existing =
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
153 StackSamplingProfiler::CallStackProfile 104 StackSamplingProfiler::CallStackProfile
154 StackSamplingProfiler::CallStackProfile::CopyForTesting() const { 105 StackSamplingProfiler::CallStackProfile::CopyForTesting() const {
155 return CallStackProfile(*this); 106 return CallStackProfile(*this);
156 } 107 }
157 108
158 StackSamplingProfiler::CallStackProfile::CallStackProfile( 109 StackSamplingProfiler::CallStackProfile::CallStackProfile(
159 const CallStackProfile& other) = default; 110 const CallStackProfile& other) = default;
160 111
161 // StackSamplingProfiler::SamplingThread -------------------------------------- 112 // StackSamplingProfiler::SamplingThread --------------------------------------
162 113
163 StackSamplingProfiler::SamplingThread::SamplingThread( 114 class StackSamplingProfiler::SamplingThread : public Thread {
164 std::unique_ptr<NativeStackSampler> native_sampler, 115 public:
116 class TestAPI {
117 public:
118 // Reset the existing sampler. This will unfortunately create the object
119 // unnecessarily if it doesn't already exist but there's no way around that.
120 static void Reset();
121
122 // Disables inherent idle-shutdown behavior.
123 static void DisableIdleShutdown();
124
125 // Begins an idle shutdown as if the idle-timer had expired and wait for
126 // it to execute. Since the timer would have only been started at a time
127 // when the sampling thread actually was idle, this must be called only
128 // when it is known that there are no active sampling threads. If
129 // |simulate_intervening_add| is true then, when executed, the shutdown
130 // task will believe that a new collection has been added since it was
131 // posted.
132 static void ShutdownAssumingIdle(bool simulate_intervening_add);
133
134 private:
135 // Calls the sampling threads ShutdownTask and then signals an event.
136 static void ShutdownTaskAndSignalEvent(SamplingThread* sampler,
137 int add_events,
138 WaitableEvent* event);
139 };
140
141 struct CollectionContext {
142 CollectionContext(PlatformThreadId target,
143 const SamplingParams& params,
144 const CompletedCallback& callback,
145 WaitableEvent* finished,
146 std::unique_ptr<NativeStackSampler> sampler)
147 : collection_id(next_collection_id_.GetNext()),
148 target(target),
149 params(params),
150 callback(callback),
151 finished(finished),
152 native_sampler(std::move(sampler)) {}
153 ~CollectionContext() {}
154
155 // An identifier for this collection, used to uniquely identify it to
156 // outside interests.
157 const int collection_id;
158
159 const PlatformThreadId target; // ID of The thread being sampled.
160 const SamplingParams params; // Information about how to sample.
161 const CompletedCallback callback; // Callback made when sampling complete.
162 WaitableEvent* const finished; // Signaled when all sampling complete.
163
164 // Platform-specific module that does the actual sampling.
165 std::unique_ptr<NativeStackSampler> native_sampler;
166
167 // The absolute time for the next sample.
168 Time next_sample_time;
169
170 // The time that a profile was started, for calculating the total duration.
171 Time profile_start_time;
172
173 // Counters that indicate the current position along the acquisition.
174 int burst = 0;
175 int sample = 0;
176
177 // The collected stack samples. The active profile is always at the back().
178 CallStackProfiles profiles;
179
180 private:
181 static StaticAtomicSequenceNumber next_collection_id_;
182 };
183
184 // Gets the single instance of this class.
185 static SamplingThread* GetInstance();
186
187 // Adds a new CollectionContext to the thread. This can be called externally
188 // from any thread. This returns an ID that can later be used to stop
189 // the sampling.
190 int Add(std::unique_ptr<CollectionContext> collection);
191
192 // Removes an active collection based on its ID, forcing it to run its
193 // callback if any data has been collected. This can be called externally
194 // from any thread.
195 void Remove(int id);
196
197 private:
198 friend class TestAPI;
199 friend struct DefaultSingletonTraits<SamplingThread>;
200
201 // The different states in which the sampling-thread can be.
202 enum ThreadExecutionState {
203 // The thread is not running because it has never been started. It will be
204 // started when a sampling request is received.
205 NOT_STARTED,
206
207 // The thread is running and processing tasks. This is the state when any
208 // sampling requests are active and during the "idle" period afterward
209 // before the thread is stopped.
210 RUNNING,
211
212 // Once all sampling requests have finished and the "idle" period has
213 // expired, the thread will be set to this state and its shutdown
214 // initiated. A call to Stop() must be made to ensure the previous thread
215 // has completely exited before calling Start() and moving back to the
216 // RUNNING state.
217 EXITING,
218 };
219
220 SamplingThread();
221 ~SamplingThread() override;
222
223 // Get task runner that is usable from the outside.
224 scoped_refptr<SingleThreadTaskRunner> GetOrCreateTaskRunnerForAdd();
225 scoped_refptr<SingleThreadTaskRunner> GetTaskRunner(
226 ThreadExecutionState* out_state);
227
228 // Get task runner that is usable from the sampling thread itself.
229 scoped_refptr<SingleThreadTaskRunner> GetTaskRunnerOnSamplingThread();
230
231 // Finishes a collection and reports collected data via callback.
232 void FinishCollection(CollectionContext* collection);
233
234 // Records a single sample of a collection.
235 void RecordSample(CollectionContext* collection);
236
237 // Check if the sampling thread is idle and begin a shutdown if it is.
238 void ScheduleShutdownIfIdle();
239
240 // These methods are tasks that get posted to the internal message queue.
241 void AddCollectionTask(std::unique_ptr<CollectionContext> collection);
242 void RemoveCollectionTask(int id);
243 void PerformCollectionTask(int id);
244 void ShutdownTask(int add_events);
245
246 // Updates the |next_sample_time| time based on configured parameters.
247 // Returns true if there is a next sample or false if sampling is complete.
248 bool UpdateNextSampleTime(CollectionContext* collection);
249
250 // Thread:
251 void CleanUp() override;
252
253 // A map of IDs to collection contexts. Because this class is a singleton
254 // that is never destroyed, context objects will never be destructed except
255 // by explicit action. Thus, it's acceptable to pass unretained pointers
256 // to these objects when posting tasks.
257 std::map<int, std::unique_ptr<CollectionContext>> active_collections_;
258
259 // State maintained about the current execution (or non-execution) of
260 // the thread. This state must always be accessed while holding the
261 // lock. A copy of the task-runner is maintained here for use by any
262 // calling thread; this is necessary because Thread's accessor for it is
263 // not itself thread-safe. The lock is also used to order calls to the
264 // Thread API (Start, Stop, StopSoon, & DetachFromSequence) so that
265 // multiple threads may make those calls.
266 Lock thread_execution_state_lock_; // Protects all thread_execution_state_*
267 ThreadExecutionState thread_execution_state_ = NOT_STARTED;
268 scoped_refptr<SingleThreadTaskRunner> thread_execution_state_task_runner_;
269 bool thread_execution_state_disable_idle_shutdown_for_testing_ = false;
270
271 // A counter that notes adds of new collection requests. It is incremented
272 // when changes occur so that delayed shutdown tasks are able to detect if
273 // samething new has happened while it was waiting. Like all "execution_state"
274 // vars, this must be accessed while holding |thread_execution_state_lock_|.
275 int thread_execution_state_add_events_ = 0;
276
277 DISALLOW_COPY_AND_ASSIGN(SamplingThread);
278 };
279
280 // static
281 void StackSamplingProfiler::SamplingThread::TestAPI::Reset() {
282 SamplingThread* sampler = SamplingThread::GetInstance();
283
284 ThreadExecutionState state;
285 {
286 AutoLock lock(sampler->thread_execution_state_lock_);
287 state = sampler->thread_execution_state_;
288 DCHECK(sampler->active_collections_.empty());
289 }
290
291 // Stop the thread and wait for it to exit. This has to be done through by
292 // the thread itself because it has taken ownership of its own lifetime.
293 if (state == RUNNING) {
294 ShutdownAssumingIdle(false);
295 state = EXITING;
296 }
297 // Make sure thread is cleaned up since state will be reset to NOT_STARTED.
298 if (state == EXITING)
299 sampler->Stop();
300
301 // Reset internal variables to the just-initialized state.
302 {
303 AutoLock lock(sampler->thread_execution_state_lock_);
304 sampler->thread_execution_state_ = NOT_STARTED;
305 sampler->thread_execution_state_task_runner_ = nullptr;
306 sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = false;
307 sampler->thread_execution_state_add_events_ = 0;
308 }
309 }
310
311 // static
312 void StackSamplingProfiler::SamplingThread::TestAPI::DisableIdleShutdown() {
313 SamplingThread* sampler = SamplingThread::GetInstance();
314
315 {
316 AutoLock lock(sampler->thread_execution_state_lock_);
317 sampler->thread_execution_state_disable_idle_shutdown_for_testing_ = true;
318 }
319 }
320
321 // static
322 void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownAssumingIdle(
323 bool simulate_intervening_add) {
324 SamplingThread* sampler = SamplingThread::GetInstance();
325
326 ThreadExecutionState state;
327 scoped_refptr<SingleThreadTaskRunner> task_runner =
328 sampler->GetTaskRunner(&state);
329 DCHECK_EQ(RUNNING, state);
330 DCHECK(task_runner);
331
332 int add_events;
333 {
334 AutoLock lock(sampler->thread_execution_state_lock_);
335 add_events = sampler->thread_execution_state_add_events_;
336 if (simulate_intervening_add)
337 ++sampler->thread_execution_state_add_events_;
338 }
339
340 WaitableEvent executed(WaitableEvent::ResetPolicy::MANUAL,
341 WaitableEvent::InitialState::NOT_SIGNALED);
342 // PostTaskAndReply won't work because thread and associated message-loop may
343 // be shut down.
344 task_runner->PostTask(FROM_HERE,
345 Bind(&ShutdownTaskAndSignalEvent, Unretained(sampler),
346 add_events, Unretained(&executed)));
347 executed.Wait();
348 }
349
350 // static
351 void StackSamplingProfiler::SamplingThread::TestAPI::ShutdownTaskAndSignalEvent(
352 SamplingThread* sampler,
353 int add_events,
354 WaitableEvent* event) {
355 sampler->ShutdownTask(add_events);
356 event->Signal();
357 }
358
359 StaticAtomicSequenceNumber StackSamplingProfiler::SamplingThread::
360 CollectionContext::next_collection_id_;
361
362 StackSamplingProfiler::SamplingThread::SamplingThread()
363 : Thread("StackSamplingProfiler") {}
364
365 StackSamplingProfiler::SamplingThread::~SamplingThread() = default;
366
367 StackSamplingProfiler::SamplingThread*
368 StackSamplingProfiler::SamplingThread::GetInstance() {
369 return Singleton<SamplingThread, LeakySingletonTraits<SamplingThread>>::get();
370 }
371
372 int StackSamplingProfiler::SamplingThread::Add(
373 std::unique_ptr<CollectionContext> collection) {
374 // This is not to be run on the sampling thread.
375
376 int id = collection->collection_id;
377 scoped_refptr<SingleThreadTaskRunner> task_runner =
378 GetOrCreateTaskRunnerForAdd();
379
380 task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::AddCollectionTask,
381 Unretained(this), Passed(&collection)));
382
383 return id;
384 }
385
386 void StackSamplingProfiler::SamplingThread::Remove(int id) {
387 // This is not to be run on the sampling thread.
388
389 ThreadExecutionState state;
390 scoped_refptr<SingleThreadTaskRunner> task_runner = GetTaskRunner(&state);
391 if (state != RUNNING)
392 return;
393 DCHECK(task_runner);
394
395 // This can fail if the thread were to exit between acquisition of the task
396 // runner above and the call below. In that case, however, everything has
397 // stopped so there's no need to try to stop it.
398 task_runner->PostTask(FROM_HERE, Bind(&SamplingThread::RemoveCollectionTask,
399 Unretained(this), id));
400 }
401
402 scoped_refptr<SingleThreadTaskRunner>
403 StackSamplingProfiler::SamplingThread::GetOrCreateTaskRunnerForAdd() {
404 AutoLock lock(thread_execution_state_lock_);
405
406 // The increment of the "add events" count is why this method is to be only
407 // called from "add".
408 ++thread_execution_state_add_events_;
409
410 if (thread_execution_state_ == RUNNING) {
411 DCHECK(thread_execution_state_task_runner_);
412 // This shouldn't be called from the sampling thread as it's inefficient.
413 // Use GetTaskRunnerOnSamplingThread() instead.
414 DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
415 return thread_execution_state_task_runner_;
416 }
417
418 if (thread_execution_state_ == EXITING) {
419 // The previous instance has only been partially cleaned up. It is necessary
420 // to call Stop() before Start().
421 Stop();
422 }
423
424 // The thread is not running. Start it and get associated runner. The task-
425 // runner has to be saved for future use because though it can be used from
426 // any thread, it can be acquired via task_runner() only on the created
427 // thread and the thread that creates it (i.e. this thread) for thread-safety
428 // reasons which are alleviated in SamplingThread by gating access to it with
429 // the |thread_execution_state_lock_|.
430 Start();
431 thread_execution_state_ = RUNNING;
432 thread_execution_state_task_runner_ = Thread::task_runner();
433
434 // Detach the sampling thread from the "sequence" (i.e. thread) that
435 // started it so that it can be self-managed or stopped by another thread.
436 DetachFromSequence();
437
438 return thread_execution_state_task_runner_;
439 }
440
441 scoped_refptr<SingleThreadTaskRunner>
442 StackSamplingProfiler::SamplingThread::GetTaskRunner(
443 ThreadExecutionState* out_state) {
444 AutoLock lock(thread_execution_state_lock_);
445 if (out_state)
446 *out_state = thread_execution_state_;
447 if (thread_execution_state_ == RUNNING) {
448 // This shouldn't be called from the sampling thread as it's inefficient.
449 // Use GetTaskRunnerOnSamplingThread() instead.
450 DCHECK_NE(GetThreadId(), PlatformThread::CurrentId());
451 DCHECK(thread_execution_state_task_runner_);
452 } else {
453 DCHECK(!thread_execution_state_task_runner_);
454 }
455
456 return thread_execution_state_task_runner_;
457 }
458
459 scoped_refptr<SingleThreadTaskRunner>
460 StackSamplingProfiler::SamplingThread::GetTaskRunnerOnSamplingThread() {
461 // This should be called only from the sampling thread as it has limited
462 // accessibility.
463 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
464
465 return Thread::task_runner();
466 }
467
468 void StackSamplingProfiler::SamplingThread::FinishCollection(
469 CollectionContext* collection) {
470 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
471
472 // If there is no duration for the final profile (because it was stopped),
473 // calculate it now.
474 if (!collection->profiles.empty() &&
475 collection->profiles.back().profile_duration == TimeDelta()) {
476 collection->profiles.back().profile_duration =
477 Time::Now() - collection->profile_start_time;
478 }
479
480 // Extract some information so callback and event-signalling can still be
481 // done after the collection has been removed from the list of "active" ones.
482 // This allows the the controlling object (and tests using it) to be confident
483 // that collection is fully finished when those things occur.
484 const CompletedCallback callback = collection->callback;
485 CallStackProfiles profiles = std::move(collection->profiles);
486 WaitableEvent* finished = collection->finished;
487
488 // Remove this collection from the map of known ones. The |collection|
489 // parameter is invalid after this point.
490 size_t count = active_collections_.erase(collection->collection_id);
491 DCHECK_EQ(1U, count);
492
493 // Run the associated callback, passing the collected profiles.
494 callback.Run(std::move(profiles));
495
496 // Signal that this collection is finished.
497 finished->Signal();
498 }
499
500 void StackSamplingProfiler::SamplingThread::RecordSample(
501 CollectionContext* collection) {
502 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
503 DCHECK(collection->native_sampler);
504
505 // If this is the first sample of a burst, a new Profile needs to be created
506 // and filled.
507 if (collection->sample == 0) {
508 collection->profiles.push_back(CallStackProfile());
509 CallStackProfile& profile = collection->profiles.back();
510 profile.sampling_period = collection->params.sampling_interval;
511 collection->profile_start_time = Time::Now();
512 collection->native_sampler->ProfileRecordingStarting(&profile.modules);
513 }
514
515 // The currently active profile being captured.
516 CallStackProfile& profile = collection->profiles.back();
517
518 // Record a single sample.
519 profile.samples.push_back(Sample());
520 collection->native_sampler->RecordStackSample(&profile.samples.back());
521
522 // If this is the last sample of a burst, record the total time.
523 if (collection->sample == collection->params.samples_per_burst - 1) {
524 profile.profile_duration = Time::Now() - collection->profile_start_time;
525 collection->native_sampler->ProfileRecordingStopped();
526 }
527 }
528
529 void StackSamplingProfiler::SamplingThread::ScheduleShutdownIfIdle() {
530 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
531
532 if (!active_collections_.empty())
533 return;
534
535 int add_events;
536 {
537 AutoLock lock(thread_execution_state_lock_);
538 if (thread_execution_state_disable_idle_shutdown_for_testing_)
539 return;
540 add_events = thread_execution_state_add_events_;
541 }
542
543 GetTaskRunnerOnSamplingThread()->PostDelayedTask(
544 FROM_HERE,
545 Bind(&SamplingThread::ShutdownTask, Unretained(this), add_events),
546 TimeDelta::FromSeconds(60));
547 }
548
549 void StackSamplingProfiler::SamplingThread::AddCollectionTask(
550 std::unique_ptr<CollectionContext> collection) {
551 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
552
553 const int collection_id = collection->collection_id;
554 const TimeDelta initial_delay = collection->params.initial_delay;
555
556 active_collections_.insert(
557 std::make_pair(collection_id, std::move(collection)));
558
559 GetTaskRunnerOnSamplingThread()->PostDelayedTask(
560 FROM_HERE,
561 Bind(&SamplingThread::PerformCollectionTask, Unretained(this),
562 collection_id),
563 initial_delay);
564
565 // Another increment of "add events" serves to invalidate any pending
566 // shutdown tasks that may have been initiated between the Add() and this
567 // task running.
568 {
569 AutoLock lock(thread_execution_state_lock_);
570 ++thread_execution_state_add_events_;
571 }
572 }
573
574 void StackSamplingProfiler::SamplingThread::RemoveCollectionTask(int id) {
575 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
576
577 auto found = active_collections_.find(id);
578 if (found == active_collections_.end())
579 return;
580
581 FinishCollection(found->second.get());
582 ScheduleShutdownIfIdle();
583 }
584
585 void StackSamplingProfiler::SamplingThread::PerformCollectionTask(int id) {
586 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
587
588 auto found = active_collections_.find(id);
589
590 // The task won't be found if it has been stopped.
591 if (found == active_collections_.end())
592 return;
593
594 CollectionContext* collection = found->second.get();
595
596 // Handle first-run with no "next time".
597 if (collection->next_sample_time == Time())
598 collection->next_sample_time = Time::Now();
599
600 // Do the collection of a single sample.
601 RecordSample(collection);
602
603 // Update the time of the next sample recording.
604 if (UpdateNextSampleTime(collection)) {
605 bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask(
606 FROM_HERE,
607 Bind(&SamplingThread::PerformCollectionTask, Unretained(this), id),
608 std::max(collection->next_sample_time - Time::Now(), TimeDelta()));
609 DCHECK(success);
610 } else {
611 // All capturing has completed so finish the collection. By not re-adding
612 // it to the task queue, the collection will "expire" (i.e. no further work
613 // will be done). The |collection| variable will be invalid after this call.
614 FinishCollection(collection);
615 ScheduleShutdownIfIdle();
616 }
617 }
618
619 void StackSamplingProfiler::SamplingThread::ShutdownTask(int add_events) {
620 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
621
622 // Holding this lock ensures that any attempt to start another job will
623 // get postponed until |thread_execution_state_| is updated, thus eliminating
624 // the race in starting a new thread while the previous one is exiting.
625 AutoLock lock(thread_execution_state_lock_);
626
627 // If the current count of creation requests doesn't match the passed count
628 // then other tasks have been created since this was posted. Abort shutdown.
629 if (thread_execution_state_add_events_ != add_events)
630 return;
631
632 // There can be no new AddCollectionTasks at this point because creating
633 // those always increments "add events". There may be other requests, like
634 // Remove, but it's okay to schedule the thread to stop once they've been
635 // executed (i.e. "soon").
636 DCHECK(active_collections_.empty());
637 StopSoon();
638
639 // StopSoon will have set the owning sequence (again) so it must be detached
640 // (again) in order for Stop/Start to be called (again) should more work
641 // come in. Holding the |thread_execution_state_lock_| ensures the necessary
642 // happens-after with regard to this detach and future Thread API calls.
643 DetachFromSequence();
644
645 // Set the thread_state variable so the thread will be restarted when new
646 // work comes in. Remove the |thread_execution_state_task_runner_| to avoid
647 // confusion.
648 thread_execution_state_ = EXITING;
649 thread_execution_state_task_runner_ = nullptr;
650 }
651
652 bool StackSamplingProfiler::SamplingThread::UpdateNextSampleTime(
653 CollectionContext* collection) {
654 // This will keep a consistent average interval between samples but will
655 // result in constant series of acquisitions, thus nearly locking out the
656 // target thread, if the interval is smaller than the time it takes to
657 // actually acquire the sample. Anything sampling that quickly is going
658 // to be a problem anyway so don't worry about it.
659 if (++collection->sample < collection->params.samples_per_burst) {
660 collection->next_sample_time += collection->params.sampling_interval;
661 return true;
662 }
663
664 if (++collection->burst < collection->params.bursts) {
665 collection->sample = 0;
666 collection->next_sample_time += collection->params.burst_interval;
667 return true;
668 }
669
670 return false;
671 }
672
673 void StackSamplingProfiler::SamplingThread::CleanUp() {
674 DCHECK_EQ(GetThreadId(), PlatformThread::CurrentId());
675
676 // There should be no collections remaining when the thread stops.
677 DCHECK(active_collections_.empty());
678
679 // Let the parent clean up.
680 Thread::CleanUp();
681 }
682
683 // StackSamplingProfiler ------------------------------------------------------
684
685 // static
686 void StackSamplingProfiler::TestAPI::Reset() {
687 SamplingThread::TestAPI::Reset();
688 ResetAnnotations();
689 }
690
691 // static
692 void StackSamplingProfiler::TestAPI::ResetAnnotations() {
693 subtle::NoBarrier_Store(&process_milestones_, 0u);
694 }
695
696 // static
697 bool StackSamplingProfiler::TestAPI::IsSamplingThreadRunning() {
698 return SamplingThread::GetInstance()->IsRunning();
699 }
700
701 // static
702 void StackSamplingProfiler::TestAPI::DisableIdleShutdown() {
703 SamplingThread::TestAPI::DisableIdleShutdown();
704 }
705
706 // static
707 void StackSamplingProfiler::TestAPI::PerformSamplingThreadIdleShutdown(
708 bool simulate_intervening_start) {
709 SamplingThread::TestAPI::ShutdownAssumingIdle(simulate_intervening_start);
710 }
711
712 subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0;
713
714 StackSamplingProfiler::StackSamplingProfiler(
165 const SamplingParams& params, 715 const SamplingParams& params,
166 const CompletedCallback& completed_callback) 716 const CompletedCallback& callback,
167 : native_sampler_(std::move(native_sampler)), 717 NativeStackSamplerTestDelegate* test_delegate)
168 params_(params), 718 : StackSamplingProfiler(base::PlatformThread::CurrentId(),
169 stop_event_(WaitableEvent::ResetPolicy::AUTOMATIC, 719 params,
170 WaitableEvent::InitialState::NOT_SIGNALED), 720 callback,
171 completed_callback_(completed_callback) {} 721 test_delegate) {}
172
173 StackSamplingProfiler::SamplingThread::~SamplingThread() {}
174
175 void StackSamplingProfiler::SamplingThread::ThreadMain() {
176 PlatformThread::SetName("Chrome_SamplingProfilerThread");
177
178 // For now, just ignore any requests to profile while another profiler is
179 // working.
180 if (!concurrent_profiling_lock.Get().Try())
181 return;
182
183 CallStackProfiles profiles;
184 CollectProfiles(&profiles);
185 concurrent_profiling_lock.Get().Release();
186 completed_callback_.Run(std::move(profiles));
187 }
188
189 // Depending on how long the sampling takes and the length of the sampling
190 // interval, a burst of samples could take arbitrarily longer than
191 // samples_per_burst * sampling_interval. In this case, we (somewhat
192 // arbitrarily) honor the number of samples requested rather than strictly
193 // adhering to the sampling intervals. Once we have established users for the
194 // StackSamplingProfiler and the collected data to judge, we may go the other
195 // way or make this behavior configurable.
196 void StackSamplingProfiler::SamplingThread::CollectProfile(
197 CallStackProfile* profile,
198 TimeDelta* elapsed_time,
199 bool* was_stopped) {
200 ElapsedTimer profile_timer;
201 native_sampler_->ProfileRecordingStarting(&profile->modules);
202 profile->sampling_period = params_.sampling_interval;
203 *was_stopped = false;
204 TimeDelta previous_elapsed_sample_time;
205 for (int i = 0; i < params_.samples_per_burst; ++i) {
206 if (i != 0) {
207 // Always wait, even if for 0 seconds, so we can observe a signal on
208 // stop_event_.
209 if (stop_event_.TimedWait(
210 std::max(params_.sampling_interval - previous_elapsed_sample_time,
211 TimeDelta()))) {
212 *was_stopped = true;
213 break;
214 }
215 }
216 ElapsedTimer sample_timer;
217 profile->samples.push_back(Sample());
218 native_sampler_->RecordStackSample(&profile->samples.back());
219 previous_elapsed_sample_time = sample_timer.Elapsed();
220 }
221
222 *elapsed_time = profile_timer.Elapsed();
223 profile->profile_duration = *elapsed_time;
224 native_sampler_->ProfileRecordingStopped();
225 }
226
227 // In an analogous manner to CollectProfile() and samples exceeding the expected
228 // total sampling time, bursts may also exceed the burst_interval. We adopt the
229 // same wait-and-see approach here.
230 void StackSamplingProfiler::SamplingThread::CollectProfiles(
231 CallStackProfiles* profiles) {
232 if (stop_event_.TimedWait(params_.initial_delay))
233 return;
234
235 TimeDelta previous_elapsed_profile_time;
236 for (int i = 0; i < params_.bursts; ++i) {
237 if (i != 0) {
238 // Always wait, even if for 0 seconds, so we can observe a signal on
239 // stop_event_.
240 if (stop_event_.TimedWait(
241 std::max(params_.burst_interval - previous_elapsed_profile_time,
242 TimeDelta())))
243 return;
244 }
245
246 CallStackProfile profile;
247 bool was_stopped = false;
248 CollectProfile(&profile, &previous_elapsed_profile_time, &was_stopped);
249 if (!profile.samples.empty())
250 profiles->push_back(std::move(profile));
251
252 if (was_stopped)
253 return;
254 }
255 }
256
257 void StackSamplingProfiler::SamplingThread::Stop() {
258 stop_event_.Signal();
259 }
260
261 // StackSamplingProfiler ------------------------------------------------------
262
263 subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0;
264
265 StackSamplingProfiler::StackSamplingProfiler(
266 PlatformThreadId thread_id,
267 const SamplingParams& params,
268 const CompletedCallback& callback)
269 : StackSamplingProfiler(thread_id, params, callback, nullptr) {}
270 722
271 StackSamplingProfiler::StackSamplingProfiler( 723 StackSamplingProfiler::StackSamplingProfiler(
272 PlatformThreadId thread_id, 724 PlatformThreadId thread_id,
273 const SamplingParams& params, 725 const SamplingParams& params,
274 const CompletedCallback& callback, 726 const CompletedCallback& callback,
275 NativeStackSamplerTestDelegate* test_delegate) 727 NativeStackSamplerTestDelegate* test_delegate)
276 : thread_id_(thread_id), params_(params), completed_callback_(callback), 728 : thread_id_(thread_id),
277 test_delegate_(test_delegate) { 729 params_(params),
278 } 730 completed_callback_(callback),
731 // The event starts "signaled" so code knows it's safe to start thread
732 // and "manual" so that it can be waited in multiple places.
733 profiling_inactive_(WaitableEvent::ResetPolicy::MANUAL,
734 WaitableEvent::InitialState::SIGNALED),
735 collection_id_(NULL_COLLECTION_ID),
736 test_delegate_(test_delegate) {}
279 737
280 StackSamplingProfiler::~StackSamplingProfiler() { 738 StackSamplingProfiler::~StackSamplingProfiler() {
739 // Stop returns immediately but the shutdown runs asynchronously. There is a
740 // non-zero probability that one more sample will be taken after this call
741 // returns.
281 Stop(); 742 Stop();
282 if (!sampling_thread_handle_.is_null()) 743
283 PlatformThread::Join(sampling_thread_handle_); 744 // The behavior of sampling a thread that has exited is undefined and could
284 } 745 // cause Bad Things(tm) to occur. The safety model provided by this class is
285 746 // that an instance of this object is expected to live at least as long as
286 // static 747 // the thread it is sampling. However, because the sampling is performed
287 void StackSamplingProfiler::StartAndRunAsync( 748 // asynchronously by the SamplingThread, there is no way to guarantee this
288 PlatformThreadId thread_id, 749 // is true without waiting for it to signal that it has finished.
289 const SamplingParams& params, 750 //
290 const CompletedCallback& callback) { 751 // The wait time should, at most, be only as long as it takes to collect one
291 CHECK(ThreadTaskRunnerHandle::Get()); 752 // sample (~200us) or none at all if sampling has already completed.
292 AsyncRunner::Run(thread_id, params, callback); 753 ThreadRestrictions::ScopedAllowWait allow_wait;
754 profiling_inactive_.Wait();
293 } 755 }
294 756
295 void StackSamplingProfiler::Start() { 757 void StackSamplingProfiler::Start() {
296 if (completed_callback_.is_null()) 758 if (completed_callback_.is_null())
297 return; 759 return;
298 760
299 std::unique_ptr<NativeStackSampler> native_sampler = 761 std::unique_ptr<NativeStackSampler> native_sampler =
300 NativeStackSampler::Create(thread_id_, &RecordAnnotations, 762 NativeStackSampler::Create(thread_id_, &RecordAnnotations,
301 test_delegate_); 763 test_delegate_);
764
302 if (!native_sampler) 765 if (!native_sampler)
303 return; 766 return;
304 767
305 sampling_thread_.reset(new SamplingThread(std::move(native_sampler), params_, 768 // Wait for profiling to be "inactive", then reset it for the upcoming run.
306 completed_callback_)); 769 profiling_inactive_.Wait();
307 if (!PlatformThread::Create(0, sampling_thread_.get(), 770 profiling_inactive_.Reset();
308 &sampling_thread_handle_)) 771
309 sampling_thread_.reset(); 772 DCHECK_EQ(NULL_COLLECTION_ID, collection_id_);
773 collection_id_ = SamplingThread::GetInstance()->Add(
774 MakeUnique<SamplingThread::CollectionContext>(
775 thread_id_, params_, completed_callback_, &profiling_inactive_,
776 std::move(native_sampler)));
777 DCHECK_NE(NULL_COLLECTION_ID, collection_id_);
310 } 778 }
311 779
312 void StackSamplingProfiler::Stop() { 780 void StackSamplingProfiler::Stop() {
313 if (sampling_thread_) 781 SamplingThread::GetInstance()->Remove(collection_id_);
314 sampling_thread_->Stop(); 782 collection_id_ = NULL_COLLECTION_ID;
315 } 783 }
316 784
317 // static 785 // static
318 void StackSamplingProfiler::SetProcessMilestone(int milestone) { 786 void StackSamplingProfiler::SetProcessMilestone(int milestone) {
319 DCHECK_LE(0, milestone); 787 DCHECK_LE(0, milestone);
320 DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone); 788 DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone);
321 DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone)); 789 DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone));
322 ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0); 790 ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0);
323 } 791 }
324 792
325 // static 793 // static
326 void StackSamplingProfiler::ResetAnnotationsForTesting() {
327 subtle::NoBarrier_Store(&process_milestones_, 0u);
328 }
329
330 // static
331 void StackSamplingProfiler::RecordAnnotations(Sample* sample) { 794 void StackSamplingProfiler::RecordAnnotations(Sample* sample) {
332 // The code inside this method must not do anything that could acquire a 795 // The code inside this method must not do anything that could acquire a
333 // mutex, including allocating memory (which includes LOG messages) because 796 // mutex, including allocating memory (which includes LOG messages) because
334 // that mutex could be held by a stopped thread, thus resulting in deadlock. 797 // that mutex could be held by a stopped thread, thus resulting in deadlock.
335 sample->process_milestones = subtle::NoBarrier_Load(&process_milestones_); 798 sample->process_milestones = subtle::NoBarrier_Load(&process_milestones_);
336 } 799 }
337 800
338 // StackSamplingProfiler::Frame global functions ------------------------------ 801 // StackSamplingProfiler::Frame global functions ------------------------------
339 802
340 bool operator==(const StackSamplingProfiler::Module& a, 803 bool operator==(const StackSamplingProfiler::Module& a,
(...skipping 29 matching lines...) Expand all
370 } 833 }
371 834
372 bool operator<(const StackSamplingProfiler::Frame &a, 835 bool operator<(const StackSamplingProfiler::Frame &a,
373 const StackSamplingProfiler::Frame &b) { 836 const StackSamplingProfiler::Frame &b) {
374 return (a.module_index < b.module_index) || 837 return (a.module_index < b.module_index) ||
375 (a.module_index == b.module_index && 838 (a.module_index == b.module_index &&
376 a.instruction_pointer < b.instruction_pointer); 839 a.instruction_pointer < b.instruction_pointer);
377 } 840 }
378 841
379 } // namespace base 842 } // namespace base
OLDNEW
« no previous file with comments | « base/profiler/stack_sampling_profiler.h ('k') | base/profiler/stack_sampling_profiler_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698