def gdb_log_parser()

in tensorflow/lite/micro/kernels/vexriscv/utils/log_parser.py [0:0]


def gdb_log_parser(data, output, re_file, ignore_list=None, full_trace=False):
  """
  Args:
    data: list of strings of logs from GDB
    output: string of output filename
    re_file: path to the regex *.json file
    ignore_list: list of string (functions) to ignore
    full_trace: bool to generate full stack trace of the log
  """
  regex_parser = load_regex_parser(re_file)

  trace = collections.defaultdict(list)
  stack = []
  processed = []
  for line in data:
    # Skip invalid lines
    if not line.startswith("#"):
      continue

    # Skip redundant lines
    if not full_trace and not line.startswith("#0"):
      continue

    # Remove ANSI color symbols
    # line = ANSI_CLEANER.sub("", line)
    line = regex_parser["base"].sub("", line)

    # Extract function names with regex
    find = None
    for r in regex_parser["custom"]:
      find = r.findall(line)

      if len(find) != 0:
        break

    if find is None or len(find) == 0:
      continue

    # Extract content from `re.findall` results
    target = find[0][0] if isinstance(find[0], tuple) else find[0]

    # Extract function name from `$ADDR in $NAME`, e.g.
    # `0x40002998 in __addsf3` -> `__addsf3`
    if " in " in target:
      target = target.split()[-1]

    # Remove leading/trailing spaces
    target = target.strip()

    if full_trace:
      if line.startswith("#0") and stack:
        # Encode the trace to string
        temp = "/".join(stack)
        trace[stack[0]].append(temp)

        # Clear up previous stack
        stack.clear()

      stack.append(target)

    if not line.startswith("#0"):
      continue

    if ignore_list and target in ignore_list:
      continue

    # Strip the string before adding into parsed list
    processed.append(target)

  print("Extracted {} lines".format(len(processed)))

  # Write parsed log to file
  writelines(processed, output)

  if full_trace:
    content = {}
    for top, paths in trace.items():
      content[top] = []
      counter = collections.Counter(paths)

      for path, counts in counter.items():
        info = {"counts": counts, "path": path.split("/")}
        content[top].append(info)

    name = os.path.splitext(output)[0]
    with open(name + ".json", "w") as f:
      json.dump(content, f, sort_keys=True, indent=4)

  print("Parsed the log to `{}`".format(output))