TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp()

in src/kudu/util/debug/trace_event_impl.cc [1768:1923]


TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
    char phase,
    const unsigned char* category_group_enabled,
    const char* name,
    uint64_t id,
    int thread_id,
    const MicrosecondsInt64& timestamp,
    int num_args,
    const char** arg_names,
    const unsigned char* arg_types,
    const uint64_t* arg_values,
    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
    unsigned char flags) {
  TraceEventHandle handle = { 0, 0, 0 };
  if (!*category_group_enabled)
    return handle;

  DCHECK(name);

  if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
    id ^= process_id_hash_;

  MicrosecondsInt64 now = OffsetTimestamp(timestamp);
  MicrosecondsInt64 thread_now = GetThreadCpuTimeMicros();

  PerThreadInfo* thr_info = thread_local_info_;
  if (PREDICT_FALSE(!thr_info)) {
    thr_info = SetupThreadLocalBuffer();
  }

  // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
  // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
  // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
  if (base::subtle::NoBarrier_Load(&thr_info->is_in_trace_event_))
    return handle;

  MarkFlagInScope thread_is_in_trace_event(&thr_info->is_in_trace_event_);

  ThreadLocalEventBuffer* thread_local_event_buffer =
    reinterpret_cast<ThreadLocalEventBuffer*>(
      base::subtle::NoBarrier_Load(
        reinterpret_cast<AtomicWord*>(&thr_info->event_buffer_)));

  // If we have an event buffer, but it's a left-over from a previous trace,
  // delete it.
  if (PREDICT_FALSE(thread_local_event_buffer &&
                    !CheckGeneration(thread_local_event_buffer->generation()))) {
    // We might also race against a flusher thread, so we have to atomically
    // take the buffer.
    thread_local_event_buffer = thr_info->AtomicTakeBuffer();
    delete thread_local_event_buffer;
    thread_local_event_buffer = nullptr;
  }

  // If there is no current buffer, create one for this event.
  if (PREDICT_FALSE(!thread_local_event_buffer)) {
    thread_local_event_buffer = new ThreadLocalEventBuffer(this);

    base::subtle::NoBarrier_Store(
      reinterpret_cast<AtomicWord*>(&thr_info->event_buffer_),
      reinterpret_cast<AtomicWord>(thread_local_event_buffer));
  }

  // Check and update the current thread name only if the event is for the
  // current thread to avoid locks in most cases.
  if (thread_id == static_cast<int>(Thread::UniqueThreadId())) {
    Thread* kudu_thr = Thread::current_thread();
    if (kudu_thr) {
      const char* new_name = kudu_thr->name().c_str();
      // Check if the thread name has been set or changed since the previous
      // call (if any), but don't bother if the new name is empty. Note this will
      // not detect a thread name change within the same char* buffer address: we
      // favor common case performance over corner case correctness.
      if (PREDICT_FALSE(new_name != g_current_thread_name &&
                        new_name && *new_name)) {
        g_current_thread_name = new_name;

        SpinLockHolder thread_info_lock(&thread_info_lock_);

        auto existing_name = thread_names_.find(thread_id);
        if (existing_name == thread_names_.end()) {
          // This is a new thread id, and a new name.
          thread_names_[thread_id] = new_name;
        } else {
          // This is a thread id that we've seen before, but potentially with a
          // new name.
          std::vector<StringPiece> existing_names = strings::Split(existing_name->second, ",");
          bool found = std::find(existing_names.begin(),
                                 existing_names.end(),
                                 new_name) != existing_names.end();
          if (!found) {
            if (existing_names.size())
              existing_name->second.push_back(',');
            existing_name->second.append(new_name);
          }
        }
      }
    }
  }

  std::string console_message;
  if (*category_group_enabled &
      (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) {
    TraceEvent* trace_event = thread_local_event_buffer->AddTraceEvent(&handle);

    if (trace_event) {
      trace_event->Initialize(thread_id, now, thread_now, phase,
                              category_group_enabled, name, id,
                              num_args, arg_names, arg_types, arg_values,
                              convertable_values, flags);

#if defined(OS_ANDROID)
      trace_event->SendToATrace();
#endif
    }

    if (trace_options() & ECHO_TO_CONSOLE) {
      console_message = EventToConsoleMessage(
          phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
          timestamp, trace_event);
    }
  }

  if (PREDICT_FALSE(console_message.size()))
    LOG(ERROR) << console_message;

  if (PREDICT_FALSE(reinterpret_cast<const unsigned char*>(
                      base::subtle::NoBarrier_Load(&watch_category_)) == category_group_enabled)) {
    bool event_name_matches;
    WatchEventCallback watch_event_callback_copy;
    {
      SpinLockHolder lock(&lock_);
      event_name_matches = watch_event_name_ == name;
      watch_event_callback_copy = watch_event_callback_;
    }
    if (event_name_matches) {
      if (watch_event_callback_copy)
        watch_event_callback_copy();
    }
  }

  if (PREDICT_FALSE(*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK)) {
    EventCallback event_callback = reinterpret_cast<EventCallback>(
      base::subtle::NoBarrier_Load(&event_callback_));
    if (event_callback) {
      event_callback(now,
                     phase == TRACE_EVENT_PHASE_COMPLETE ?
                         TRACE_EVENT_PHASE_BEGIN : phase,
                     category_group_enabled, name, id,
                     num_args, arg_names, arg_types, arg_values,
                     flags);
    }
  }

  return handle;
}