'''
Generate a Chrome-Traceview report for a chum uri.

https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU
'''
#pylint: disable=W0311,C0330,C0321
from __future__ import print_function, absolute_import

import argparse
import json
import logging
import os
import re
import tempfile
import gzip
import base64
import warnings
from collections import defaultdict
from cStringIO import StringIO

import functools32 as functools

from base.message_utils import message_reader, utils, proto
from data.chum import chumpy
from scripts.tools.trace import trace_utils
from vehicle.common.trace import trace_py
# Ztrace is currently broken, and cannot be imported
#from vehicle.common.ztrace.data import analyze_timing_utilities

log = logging.getLogger()
message_reader.load_proto_extensions()


def ns_to_us(x):
  return (x / 1e3)

def ns_to_s(x):
  return (x / 1e9)

def s_to_us(x):
  return (x * 1e6)

def s_to_ns(x):
  return (x * 1e9)

def us_to_s(x):
  return (x / 1e6)

def us_to_ns(x):
  return (x * 1e3)


def debug(msg):
  log.debug(json.dumps(utils.str_to_hex(msg), indent=2, sort_keys=True))


def show_warning(message, category, filename, lineno, file=None, line=None):
  log.warning(message)

warnings.showwarning = show_warning

def delta((x1, x2), dt=1):
    if isinstance(x1, (int, float, long)):
        return (x2 - x1) / dt

    if isinstance(x1, (list, tuple)):
        return list(delta((i1, j1), dt) for i1, j1 in  zip(x1, x2))

    if isinstance(x1, (dict)):
        d = {}
        for k in x1:
            if not k in x2:
                continue
            v1 = x1[k]
            v2 = x2[k]
            k2 = k + '_per_sec' if dt is not 1 else k
            d[k2] = delta((v1, v2), dt)
        return d

    else:
        raise TypeError


@trace_py.trace
def trace_gen_nogo(chum, machine):
  uri, uri_proto, chum_store, chum_range = chum
  reader = message_reader.chum_reader(
    chum_store=chum_store, chum_range=chum_range,
    topic=[
      '/ai_monitor/output',
    ],
    enum_names=True,
  )
  last = None
  for msg in reader:
    if not last:
      if not 'last_relayed_trajectory' in msg:
        continue
      last = msg
      continue
    yield {
      'ph': 'X',
      'pid': 'NoGo',
      'tid': 'ai_monitor/output',
      'name': last.last_relayed_trajectory,
      'ts': ns_to_us(last.meta.publish_timestamp),
      'dur': ns_to_us(msg.meta.publish_timestamp - last.meta.publish_timestamp),
      'args': {
        'msg': last,
      }
    }
    last = msg


@trace_py.trace
def trace_gen_reproducibility(chum, machine):
  uri, uri_proto, chum_store, chum_range = chum
  topics = [t for t in chum_range.topics if t.endswith('/reproducibility')]
  # TODO(FW-13932) emu messages being logged as IDL, which arn't readable
  topics = [t for t in topics if not t.startswith('/emu')]
  # TODO(tom) filter by machine
  reader = message_reader.chum_reader(
    chum_store=chum_store, chum_range=chum_range,
    topics=topics,
    json_convert=False,
  )

  for msg in reader:
    if msg.proto.timestamp_ns == 0:
      log.debug('ZCI reproducibility message with bad timestemp: %s at %s', msg.meta.topic, msg.meta.timestamp)
      continue
    event = {
      'ph': 'X',
      'name': 'tick',
      'pid': msg.proto.pid or 'ZCI',
      'tid': "zci:" + msg.meta.topic.split('/')[-2],
      'ts': ns_to_us(msg.proto.timestamp_ns),
      'dur': ns_to_us(msg.proto.end_timestamp_ns - msg.proto.timestamp_ns),
      'args': {
        'inputs': utils.str_to_hex(proto.from_proto(msg.proto.input_hashes)),
        'outputs': utils.str_to_hex(proto.from_proto(msg.proto.output_hashes)),
        'ts': '{:.3f}-{:.3f}'.format(ns_to_s(msg.proto.timestamp_ns), ns_to_s(msg.proto.end_timestamp_ns)),
        'meta': msg.meta,
      }
    }
    yield event


def _trace_gen_ztrace(uri, machine):
  assert False, "ztrace currently broken: cannot use this functionality."
  range, _store, _args = message_reader.chum_range_store(uri=uri)
  invalid = lambda x: (x.startswith('/driving/velodyne') or
    x.startswith('/driving/lidar') or
    x.startswith('/log_artifact') or
    x.startswith('/zci_publisher_consumer') or
    x.startswith('/teleop_consumer') or
    x.startswith('/cammux'))
  topics = [t for t in range.topics if (t.endswith('/ztrace') or t.endswith('/metrics')) and not invalid(t)]
  reader = message_reader.chum_reader(
    uri=uri,
    topic=topics,
    json_convert=True,
  )

  # map outputs to pids
  pids = {}
  tids = {}
  zci_reader = message_reader.chum_reader(
    uri=uri,
    topic=[t for t in range.topics if t.endswith('/reproducibility')],
    json_convert=False,
  )
  for msg in zci_reader:
    for o in msg.proto.output_hashes:
      pids[o.topic] = msg.proto.pid or 'ZCI'
      tids[o.topic] = "zci:" + msg.meta.topic.split('/')[-2]

  maps = {}
  for t in topics:
    info = analyze_timing_utilities.kTopicInfoMap.get(t)
    if not info:
      continue
    try:
      scope_map = analyze_timing_utilities.make_scope_map(info.scopes_pbtxt, info.probe_enum)
      maps[t] = scope_map
    except Exception as e:
      warnings.warn("Could not parse scopes for {}: {}".format(t, e), UserWarning, 0)
      pass

  for msg in reader:
    scope_map = maps.get(msg.meta.topic)
    if not scope_map:
      continue
    active_events = {}
    for e in msg.events:
      scope = scope_map.get(e.probe)
      event = active_events.get(e.probe)
      if scope:
        # begin
        active_events[scope.end] = e
      elif event:
        # end
        scope = scope_map[event.probe]
        yield {
          'ph': 'X',
          'pid': pids.get(msg.meta.topic, 'ztrace'),
          'tid': tids.get(msg.meta.topic, msg.meta.topic),
          'name': scope.name,
          'ts': ns_to_us(msg.origin_timestamp_nanosec + us_to_ns(event.timestamp_microsec)),
          'dur': (e.timestamp_microsec - event.timestamp_microsec)
        }

def _trace_gen_tracing_tags(uri, machine):
  reader = message_reader.chum_reader(
    uri=uri,
    topic=[
      '/planner/tracker/control',
      '/remapped_real_perception', # message_merger drops tracing tags
    ],
  )
  phases = {
    0: 'unknown',
    1: 'message_receipt',
    2: 'tick_entry',
    3: 'tick_exit',
    4: 'other',
    5: 'message_publish',
  }

  tags_by_uuid = defaultdict(list)

  i = 0
  for msg in reader:
    for tag in msg.tracing_tags:
      tag_name = '{}:{}:{}'.format(tag.source_type, tag.source_name, tag.uuid)
      tags_by_uuid[tag_name].append(tag)

  for tag_name, tags in sorted(tags_by_uuid.items(), key=lambda (k, t):t[0].log[0].timestamp):
      tag = tags[-1]
      yield {
        'ph': 'X',
        'pid': 'tracing_tags',
        'ts': ns_to_us(tags[0].log[0].timestamp),
        'dur': ns_to_us(tag.log[-1].timestamp - tags[0].log[0].timestamp),
        'tid': tag_name,
        'name': tag_name,
        'args': tag,
      }

      dup = set()
      for i, log2 in enumerate(tag.log):
        if i == 0: continue
        log1 = tag.log[i - 1]
        k = (
          log1.timestamp,
          log1.component_name,
          log1.phase,
          log2.timestamp,
          log2.component_name,
          log2.phase)
        if k in dup: continue
        dup.add(k)
        if (log1.phase, log2.phase) == (2, 3):
          name = log1.component_name
        elif (log1.phase, log2.phase) == (2, 5):
          name = log1.component_name
        elif (log1.phase, log2.phase) == (1, 5):
          name = log1.component_name
        elif (log1.phase, log2.phase) == (5, 1):
          name = 'ipc'
        elif (log1.phase, log2.phase) == (5, 2):
          name = 'ipc'
        elif (log1.phase, log2.phase) == (1, 2):
          name = 'queued'
        elif (log1.phase, log2.phase) == (3, 2):
          name = 'queued'
        else:
          name = '{}:{} to {}:{}'.format(
              log1.component_name,
              phases.get(log1.phase),
              log2.component_name,
              phases.get(log2.phase))
        yield {
          'ph': 'X',
          'pid': 'tracing_tags',
          'ts': ns_to_us(log1.timestamp),
          'dur': ns_to_us(log2.timestamp - log1.timestamp),
          'tid': tag_name,
          'name': name,
        }


@trace_py.trace
def trace_gen_system_metrics(chum, machine):
  uri, uri_proto, chum_store, chum_range = chum
  topics = ['/system_monitor/{}/system_metrics'.format(machine)]
  reader = message_reader.chum_reader(
    chum_range=chum_range, chum_store=chum_store,
    topic=topics, json_convert=False)
  last = {}

  # metadata for sorting
  yield {
    'ph': 'M',
    'pid': 'System Metrics for {}'.format(machine),
    'name': 'process_sort_index',
    'args': {'sort_index': 0},
  }

  for i, msg in enumerate(reader):
    msg = msg.proto
    ts = s_to_us(msg.sample_time)
    vm = msg.virtual_memory
    pid = 'System Metrics for {}'.format(machine)
    yield {
      'ph': 'C',
      'pid': pid,
      'name': 'Physical Memory',
      'ts': ts,
      'args': {
        # these should add up to total
        'free': vm.free,
        'buffers': vm.buffers,
        'cached': vm.cached,
        'used': vm.used,
      }
    }

    swap = msg.swap_memory
    yield {
      'ph': 'C',
      'pid': pid,
      'name': 'Swap Memory',
      'ts': ts,
      'args': {
        # these should add up to total
        'free': swap.free,
        'used': swap.used,
      }
    }

    yield {
      'ph': 'C',
      'pid': pid,
      'name': 'CPU Load 1min',
      'ts': ts,
      'args': {
        'avg1': msg.load_avg.avg1,
      }
    }

    def delta_counter(c_name, d, dt=None):
      k = pid + ':' + c_name
      d1, t1 = last.get(k, (None, None))
      last[k] = d, ts
      if d1:
        if dt is None:
          dt = us_to_s(ts - t1)
        return {
          'ph': 'C',
          'pid': pid,
          'name': c_name,
          'ts': ts,
          'args': delta((d1, d), dt)
        }

    # swap usage
    yield delta_counter('Swap Memory Activity', {
      'sin': swap.sin,
      'sout': swap.sout,
    }, 1)
    yield delta_counter('CPU Times', proto.from_proto(msg.cpu_times))
    yield delta_counter('CPU Runnable', {'runnable': msg.load_avg.runnable}, 1)
    yield delta_counter('Disk IO', proto.from_proto(msg.disk_io_counters))
    yield delta_counter('CPU Stats', proto.from_proto(msg.cpu_stats))
    # TODO everything else


def process_meta(chum, machine):
  # metadata to show process names
  uri, uri_proto, chum_store, chum_range = chum
  reader = message_reader.chum_reader(
    chum_store=chum_store, chum_range=chum_range,
    start_time_ns=chum_range.start_time - s_to_ns(20),
    end_time_ns=chum_range.end_time + s_to_ns(20),
    topic='/system_monitor/{}/pid_metrics'.format(machine),
    json_convert=False,
  )

  pids = defaultdict(dict)

  for msg in reader:
    for pid in msg.proto.pids:
      pid_data = msg.proto.pids[pid]
      for thread in pid_data.threads:
        pids[pid][thread.id] = pid_data

  def name(data):
      name = getattr(data, 'exe', None)
      if not name:
          return 'unknown'
      if name == '/usr/bin/python2.7':
          name = ' '.join(data.cmdline[1:2])
      name = name.replace('/opt/zoox/ai/zoox', '')
      name = name.replace('/../zoox', '')

      try:
          path, last = name.rsplit('/', 1)
      except ValueError:
          return name

      if last == 'add_analyzers.py':
          name += ' ' + ' '.join(c for c in data.cmdline[2:] if c.startswith('__name'))
      elif last == 'roslaunch_bin.py':
          name += ' ' + ' '.join(c.split('../')[-1] for c in data.cmdline[2:])
      elif last == 'chum_record':
          pass
      else:
          name += ' ' + ' '.join(c for c in data.cmdline[2:] if not c.startswith('__log'))
      return name

  for pid in pids:
    for tid, pid_data in pids[pid].items():
      yield dict(
        ph='M', name='process_name',
        pid=pid, tid=tid,
        args=dict(name=name(pid_data)),
      )
      yield dict(
        ph='M', name='process_sort_index',
        pid=pid, tid=tid,
        args=dict(sort_index=pid),
      )


def trace_gen_none(chum, machine):
  return []


class NonMetaIdUri(ValueError): pass

@functools.lru_cache()
def uri_to_run(uri):
    # make uri full run by clearing all the timestamps
    uri_proto = chumpy.parseChumUriToProto(uri)
    if not uri_proto.meta_id:
      raise NonMetaIdUri(uri)
    uri_proto.ClearField('start_time_ns')
    uri_proto.ClearField('start_offset_ns')
    uri_proto.ClearField('end_time_ns')
    uri_proto.ClearField('end_offset_ns')
    uri_proto.ClearField('around_span_ns')
    return chumpy.renderChumUri(uri_proto)


@functools.lru_cache()
def load_stack_data(uri, topic):
  reader = message_reader.chum_reader(
    uri=uri,
    chum_input='s3://zoox-chum-vars',
    variant='perf/flamegraph',
    topic=[
      topic.replace('/samples', '/symbol_dictionary'),
      topic.replace('/samples', '/trace_tree'),
    ],
    json_convert=False,
  )
  for msg in reader:
    if 'trace_tree' in msg.meta.topic:
      trace_tree = msg.proto.nodes
    if 'symbol_dictionary' in msg.meta.topic:
      symbol_dictionary = msg.proto.symbols
  return trace_tree, symbol_dictionary


def get_stack(uri, topic, trace_id):
  run_uri = uri_to_run(uri)
  traces, symbols = load_stack_data(run_uri, topic)
  stack = []
  while trace_id:
    node = traces[trace_id]
    stack.append(symbols[node.symbol_id])
    trace_id = node.parent
  return stack


def trace_gen_stacks(chum, machine):
  uri, uri_proto, chum_store, chum_range = chum
  try:
    uri_to_run(uri)
  except NonMetaIdUri:
    log.warning("Cannot load perf stacks using a vehicle Chum URI. Please use a meta id Chum URI instead. Perf stacks will not be reported.")
    return

  # get time offset
  events_topic = '/system_monitor/{}/system_events'.format(machine)
  reader = message_reader.chum_reader(
    chum_store=chum_store,
    chum_range=chum_range,
    topic=[events_topic],
    json_convert=False,
  )
  for msg in reader:
    mono_to_realtime = msg.proto.timestamp - msg.proto.clock_monotonic
    break
  else:
    raise ValueError('No data on {} {}, cannot correlate timestamps'.format(uri, events_topic))

  perf_machine = machine
  if perf_machine == 'pcu':
    perf_machine = 'main-ai'
  topic_prefix = '/perf/{}/samples'.format(perf_machine)

  reader = message_reader.chum_reader(
    uri=uri,
    # chum_store=chum_store,
    chum_input='s3://zoox-chum-vars',
    variant='perf/flamegraph',
    topics=[topic_prefix + i for i in ('', '/csw', '/minflt', '/majflt')],
    json_convert=False,
  )

  for msg in reader:
    s = msg.proto
    event = {
      'ph': 'X',
      'name': msg.meta.topic.rsplit('/')[-1],
      'dur': 1,
      'ts': ns_to_us(s.timestamp + mono_to_realtime),
      'pid': s.pid,
      'tid': s.tid,
      'args': {
        # can't use actual stack field, so put it in args
        # https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/edit#heading=h.yr703knxre9f
        # https://chromium.googlesource.com/catapult/+/refs/heads/master/tracing/tracing/extras/importer/trace_event_importer.html#1539
        'trace_id': s.trace_id,
        'stack': get_stack(uri, msg.meta.topic, s.trace_id),
      },
    }
    yield event


def range_splitter(chum_range, uri_proto, chunk_length):
  start = chum_range.start_time
  end = chum_range.end_time
  duration = int(s_to_ns(chunk_length))
  for start in xrange(start, end, duration):
    uri_proto.start_time_ns = start
    uri_proto.end_offset_ns = min(duration, end - start)
    chum_range.start_time = uri_proto.start_time_ns
    chum_range.end_time = uri_proto.start_time_ns + uri_proto.end_offset_ns
    uri = chumpy.renderChumUri(uri_proto)
    yield uri, uri_proto, chum_range


@trace_py.trace
def save_html(filename, title, trace_data):
  HEADER = '''<!DOCTYPE html>
<html>
  <head i18n-values="dir:textdirection;">
  <meta http-equiv="Content-Type" content="text/html;charset=utf-8">
  <meta http-equiv="origin-trial" content="AnYuQDtUf6OrWCmR9Okd67JhWVTbmnRedvPi1TEvAxac8+1p6o9q08FoDO6oCbLD0xEqev+SkZFiIhFSzlY9HgUAAABxeyJvcmlnaW4iOiJodHRwczovL2dvb2dsZXVzZXJjb250ZW50LmNvbTo0NDMiLCJmZWF0dXJlIjoiV2ViQ29tcG9uZW50c1YwIiwiZXhwaXJ5IjoxNjA0NjE0NTM4LCJpc1N1YmRvbWFpbiI6dHJ1ZX0=">
  <meta http-equiv="origin-trial" content="AkFXw3wHnOs/XXYqFXpc3diDLrRFd9PTgGs/gs43haZmngI/u1g8L4bDnSKLZkB6fecjmjTwcAMQFCpWMAoHSQEAAAB8eyJvcmlnaW4iOiJodHRwczovL2Nocm9taXVtLWJ1aWxkLXN0YXRzLmFwcHNwb3QuY29tOjQ0MyIsImZlYXR1cmUiOiJXZWJDb21wb25lbnRzVjAiLCJleHBpcnkiOjE2MTIyMjM5OTksImlzU3ViZG9tYWluIjp0cnVlfQ==">
  <meta http-equiv="origin-trial" content="AtQY4wpX9+nj+Vn27cTgygzIPbtB2WoAoMQR5jK9mCm/H2gRIDH6MmGVAaziv9XnYTDKjhBnQYtecbTiIHCQiAIAAACEeyJvcmlnaW4iOiJodHRwczovL2Nocm9taXVtLWJ1aWxkLXN0YXRzLXN0YWdpbmcuYXBwc3BvdC5jb206NDQzIiwiZmVhdHVyZSI6IldlYkNvbXBvbmVudHNWMCIsImV4cGlyeSI6MTYxMjIyMzk5OSwiaXNTdWJkb21haW4iOnRydWV9">
  <title>'''

  FOOTER = '''</head>
  <body>
  </body>
</html>'''

  out = open(filename, 'w')
  out.write(HEADER)
  out.write(title)
  out.write(open('scripts/tools/trace/trace_body.html').read())

  bytes_total = 0
  bytes_compressed = 0

  T = trace_py.trace_callable
  first = True
  for uri, uri_proto, trace_events, ftrace in trace_data:
    if first and ftrace:
      ftrace_data = T(''.join)(ftrace)
      first = False
    else:
      # traceview complains if there are >2 clock sync markers, but only on
      # subsequent viewer-data payloads for some reason.
      ftrace_data = T(''.join)(i for i in ftrace if not 'trace_event_clock_sync' in i)

    trace_chunk = {
      'traceEvents': list(trace_events),
      'displayTimeUnits': 's',
      'systemTraceEvents': ftrace_data,
      # 'stackFrames': None,
      # 'otherData': None,
      # 'samples': None,
    }
    trace_gzip = StringIO()
    with gzip.GzipFile(fileobj=trace_gzip, mode='w', mtime=0, compresslevel=4) as f:
      json_data = T(json.dumps)(trace_chunk, sort_keys=True, indent=2)
      T(f.write)(json_data)
      bytes_total += len(json_data)

    trace_gzip_data = T(trace_gzip.getvalue)()
    trace_b64 = T(base64.b64encode)(trace_gzip_data)
    bytes_compressed += len(trace_gzip_data)
    log.info('Chunk bytes: %d, compressed: %d', len(json_data), len(trace_gzip_data))
    if len(json_data) > 3e8:
      log.warning('Over 300MB of data in a single chunk! Chrome may fail to load this data, consider using a shorter --chunk_length')

    out.write('''<script id="viewer-data" type="text/plain">\n''')
    out.write(trace_b64)
    out.write('''\n</script>\n''')

  log.info('Total bytes: %d, compressed: %d', bytes_total, bytes_compressed)
  if bytes_total > 5e8:
    log.warning('Over 500MB of trace data! Chrome may fail to load this data or may be very slow. Consider using a shorter Chum URI.')

  out.write(FOOTER)
  out.close()


def main():
  logging.basicConfig(level=logging.INFO)

  func_names = [n[10:] for n in globals() if n.startswith('trace_gen_')]

  TRUE = ['true', 'on', 'yes']
  FALSE = ['false', 'off','no']
  BOOL = TRUE + FALSE

  parser = argparse.ArgumentParser(
    description=__doc__,
    formatter_class=argparse.RawTextHelpFormatter)

  parser.add_argument('uri',
    help='Chum URI of range to generate')
  parser.add_argument('--bpf',
    choices=BOOL, default='true',
    help='Include bpf (scheduler) events')
  parser.add_argument('--user',
    choices=BOOL, default='true',
    help='Include user space trace events')
  parser.add_argument('--out',
    required=False,
    help='File to save trace to')
  parser.add_argument('--funcs',
    nargs='*',
    choices=func_names,
    help='Data to add to the trace.')
  parser.add_argument('--machine',
    help='Machine to read trace from',
    default='pcu')
  parser.add_argument('--chunk_length',
    help='Duration to chunk event data',
    type=float, default=2.0)
  parser.add_argument('--pid',
    help='Only show user trace data for this PID (0=all PIDS)',
    default=0, type=int)

  args = parser.parse_args()
  include_bpf = args.bpf in TRUE
  include_user = args.user in TRUE
  machine = args.machine

  def get_filename(uri_proto, machine):
    fname = 'run_trace_{}_{:.3f}_{}'.format(
      uri_proto.vehicle or uri_proto.meta_id,
      chumpy.getStartTime(uri_proto) / 1e9,
      machine
      )
    if args.out:
      return os.path.join(args.out, fname + '.html')
    tf = tempfile.NamedTemporaryFile(
        prefix=fname, suffix='.html', delete=False)
    return tf.name

  if args.funcs:
      funcs = [globals()['trace_gen_' + n] for n in args.funcs]
  else:
      funcs = [f for n, f in globals().items() if n.startswith('trace_gen_')]

  def load_trace(chum_store, chum_range, uri_proto, machine):
    pid_map = trace_utils.getPidMap(args.uri, machine)

    # trace data that does not need to be segmented
    uri = chumpy.renderChumUri(uri_proto)
    chum = uri, uri_proto, chum_store, chum_range
    trace_events = []
    ftrace = []
    trace_events += list(process_meta(chum, machine))
    yield uri, uri_proto, trace_events, ftrace

    for chum in range_splitter(chum_range, uri_proto, args.chunk_length):
      trace_events = []
      uri, uri_proto, chum_range = chum
      chum = uri, uri_proto, chum_store, chum_range

      log.info('Loading %s', uri)
      for func in funcs:
        log.debug('Processing %s', func.__name__)
        gen = func(chum, machine)
        trace_events += list(filter(None, gen))

      ftrace, events = trace_utils.getFtrace(
        uri, include_bpf, include_user, include_user, machine, pid_map, args.pid)
      log.debug('%s json events', len(events))
      log.debug('%s ftrace lines', len(ftrace))
      events = map(json.loads, events) # TODO find a way to not do this
      # trim very long running syscalls that start >1s outside the
      # requested range, so that traceview doesn't look funny
      events = [e for e in events if (e['ph'] != 'X') or (us_to_ns(e['ts'] - e['dur'] + s_to_us(1)) > chum_range.start_time)]
      trace_events += events

      yield uri, uri_proto, trace_events, ftrace

  uri_proto = chumpy.parseChumUriToProto(args.uri)
  chum_store, chum_range = chumpy.parseChumUri(args.uri)

  filename = get_filename(uri_proto, machine)
  title = args.uri
  trace_gen = load_trace(chum_store, chum_range, uri_proto, machine)
  save_html(filename, title, trace_gen)
  log.info('Saved output to \t%s', filename)


if __name__ == '__main__':
  exit(main())
