#* coding: utf-8

import json
import os, sys
import string
import urllib

import hashlib
import mimetypes
import time, datetime
import re
import functools
import platform

import pymongo
from bson.objectid import ObjectId

from pprint import pprint

from pylons import request, response, tmpl_context as c
from pylons import config
from pylons.controllers.util import redirect
from pylons.decorators.rest import restrict

from harstorage.lib.base import BaseController, render
from harstorage.lib.HAR import HAR
from harstorage.lib.MongoHandler import MongoDB
import harstorage.lib.helpers as h
from harstorage.controllers.migration import MIGRATION_STATUS

AG_FUNCS = {"timestamp": h.metrics_pick_first,
            "multi_har": h.metrics_pick_first,
            "full_load_time": h.metrics_average,
            "transaction_time": h.metrics_average,
            "requests": h.metrics_average,
            "total_size": h.metrics_average,
            "ps_scores": h.metrics_pick_first,
            "onload_event": h.metrics_average,
            "start_render_time": h.metrics_average,
            "time_to_first_byte": h.metrics_average,
            "total_dns_time": h.metrics_average,
            "total_transfer_time": h.metrics_average,
            "total_server_time": h.metrics_average,
            "avg_connecting_time": h.metrics_average,
            "avg_blocking_time": h.metrics_average,
            "text_size": h.metrics_average,
            "media_size": h.metrics_average,
            "cache_size": h.metrics_average,
            "redirects": h.metrics_average,
            "bad_requests": h.metrics_average,
            "domains": h.metrics_average}

METRICS = ["timestamp",
           "full_load_time",
           "transaction_time",
           "requests",
           "total_size",
           "ps_scores",
           "onload_event",
           "start_render_time",
           "time_to_first_byte",
           "total_dns_time",
           "total_transfer_time",
           "total_server_time",
           "avg_connecting_time",
           "avg_blocking_time",
           "text_size",
           "media_size",
           "cache_size",
           "redirects",
           "bad_requests",
           "domains"]

MULTIHAR_KEYS = ["full_load_time",
                 "transaction_time",
                 "onload_event",
                 "start_render_time",
                 "time_to_first_byte",
                 "total_dns_time",
                 "total_transfer_time",
                 "total_server_time",
                 "avg_connecting_time",
                 "bad_requests",
                 "avg_blocking_time",
                 "total_size",
                 "text_size",
                 "media_size",
                 "cache_size",
                 "requests",
                 "redirects"]


MULTIHAR_METRICS = ["multi_har"] + METRICS
for key in MULTIHAR_KEYS:
    MULTIHAR_METRICS.append("min_" + key)
    MULTIHAR_METRICS.append("max_" + key)
    AG_FUNCS["min_" + key] = h.metrics_minimum
    AG_FUNCS["max_" + key] = h.metrics_maximum


TITLES = ["Full Load Time",
          "Transaction Time",
          "Total Requests",
          "Total Size",
          "Page Speed Score",
          "onLoad Event",
          "Start Render Time",
          "Time to First Byte",
          "Total DNS Time",
          "Total Transfer Time",
          "Total Server Time",
          "Avg. Connecting Time",
          "Avg. Blocking Time",
          "Text Size",
          "Media Size",
          "Cache Size",
          "Redirects",
          "Bad Rquests",
          "Domains"]

class ResultsController(BaseController):

    """
    Core controller of repository
    """

    def __before__(self):
        """Define version of static content"""

        c.rev = config["app_conf"]["static_version"]

    @restrict("GET")
    def index(self):
        """Home page with the latest test results"""

        # Migration (harstorage v1.0)
        migration_handler = MongoDB(collection = "migration")
        if hasattr(c, "message"): return render("/error.html")


        status = migration_handler.collection.find_one({"status": MIGRATION_STATUS})
        if status is None:
            redirect("/migration/status")


        # MongoDB handler
        mdb_handler = MongoDB()
        if hasattr(c, "message"): return render("/error.html")

        all_series = list(mdb_handler.series.find().sort([("label", 1)]))

        c.filter_label = request.GET.get("label")
        if c.filter_label:
            series = []
            for serie in all_series:
                if c.filter_label == serie["label"]:
                    series.append(serie)
        else:
            series_by_label = {}
            for serie in all_series:
                by_label = series_by_label.setdefault(serie["label"], [])
                by_label.append(serie)

            fold_series = set()
            for label, series in series_by_label.items():
                if len(series) > 8:
                    fold_series.add(label)

            series = []
            for fold_serie in fold_series:
                series.append({"label": fold_serie,
                               "_id": "__search__",
                               "url": None,
                               "last_run": ""})
            for serie in all_series:
                if serie["label"] not in fold_series:
                    series.append(serie)


        # Numner of records
        c.rowcount = len(series)

        # Populate data table with the latest test results
        c.metrics_table = [[], [], [], [], []]

        for row in series:
            c.metrics_table[0].append(row["_id"])
            c.metrics_table[1].append(row["label"])
            c.metrics_table[2].append(urllib.unquote(unicode(row["url"])))
            c.metrics_table[3].append(row["last_run"])
            c.metrics_table[4].append(row.get("visual"))

        return render("/home/core.html")

    @restrict("GET")
    def details(self):
        """Page with test results"""

        mdb_handler = MongoDB()

#        # Try to fetch data for selecetor box
        c.series_id = request.GET["series"]
        c.series = mdb_handler.series.find_one(ObjectId(c.series_id))
        c.url = urllib.unquote(c.series["url"])

        # Generate context for selector
##        self._set_options_in_selector(c.mode, c.label)

        # # Define url for data aggregation
        # if c.mode == "label":
        #     c.query = "/superposed/display?" + \
        #               "step_1_label=" + c.label + \
        #               "&step_1_start_ts=" + min(c.timestamp) + \
        #               "&step_1_end_ts=" + max(c.timestamp)
        #     c.histo = "true"
        # else:
        #     c.histo = "false"
        #     c.query = "None"

        return render("/details/core.html", {"pagespeed_enabled": config["app_conf"]["ps_enabled"] == "true"})

    def _set_options_in_selector(self, mode, label):
        """
        Create context data - a list of timestamps.

        @parameter label - label of set with test results
        @parameter url   - URL of set with test results
        """

        # Read data for selector box from database
        results = MongoDB().collection.find(
            {mode: label},
            fields = ["timestamp"],
            sort = [("timestamp", -1)])

        c.timestamp = list()

        for result in results:
            c.timestamp.append(result["timestamp"])

    def _fill_time_gaps(self, results):
        """ This converts the results - which possibliy do not have a constant time-slice between the single measurements -
        into constantly time-sliced results.
        This is done by identifying the best matching time-slice and then normalizing all results into this raster.
        """

        tss = [(h.to_unix_date(result["timestamp"]), result) for result in results]

        # getting the possibliy best raster for the timestamps...
        # creating a "histogram" based on the minutes between the sample timestamps
        rasters = {1:0}
        for idx, result in enumerate(tss[:-1]):
            next_result = tss[idx + 1][0]
            diff = next_result - result[0]
            cnt = rasters.setdefault(diff, 0)
            cnt += 1
            rasters[diff] = cnt

        # selecting the most occuring raster
        best_raster_cnt = max(rasters.values())
        for value, cnt in rasters.items():
            if cnt == best_raster_cnt:
                best_raster = value

        # ... got it!

        # now filling in the gaps
        out = []
        idx = 0
        ts = tss[0][0]
        end_ts = tss[-1][0]
        dummy = {}
        for metric in METRICS:
            dummy[metric] = None
        dummy["ps_scores"] = {"Total Score": 0}

        while (ts <= end_ts) and idx < len(tss):
            if tss[idx][0] <= ts + best_raster:
                out.append(tss[idx][1])
                idx += 1
            else:
                filler = dummy.copy()
                filler["timestamp"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(ts))
                out.append(filler)
            ts += best_raster

        if results and results[-1] != out[-1]:
            out.append(results[-1])

        return out



    def _aggregate_results(self, results, count, metrics, ag_funcs):

        factor = float(count) / float(len(results))

        ag_list = [{} for idx in range(count)]

        # reordering into lists
        for idx, result in enumerate(results):
            target_idx = long(idx * factor)
            for metric in metrics:
                ag_el = ag_list[target_idx].setdefault(metric, [])
                ag_el.append(result.get(metric, "n/a"))
            ag_ts = ag_list[target_idx].setdefault("ag_timestamp", [])
            ag_ts.append(result["timestamp"])

        # calculating value from list
        for ag_result in ag_list:
            for metric in metrics:
                ag_func = ag_funcs[metric]
                ag_result[metric] = ag_func(ag_result[metric])
            ag_result["ag_timestamp"] = ag_result["ag_timestamp"][0] + "#" + ag_result["ag_timestamp"][-1]


        return ag_list

    def _convert_results_to_datapoints(self, results):
        """ Convert the docs from the mongodb to the datapoints for the highchart """

        exclude = set()

        data = [[] for dummy in METRICS]
        multihar_keys = set(MULTIHAR_KEYS)

        for result in results:

            is_multi_har = result.get("multi_har", False)

            index = 0
            for metric in METRICS:
                if metric != "ps_scores":
                    point = str(result.get(metric, "n/a"))
                else:
                    point = str(result[metric]["Total Score"])
                if point == "n/a":
                    exclude.add(metric)
                else:
                    if is_multi_har and metric in multihar_keys:
                        point += "|" + str(result.get("min_" + metric, "")) + "|" + str(result.get("max_" + metric, ""))

                data[index].append(point) # fill in the data
                index += 1

        return data, exclude

    @restrict("GET")
    def timeline(self):
        """Generate data for timeline chart"""

        titles = TITLES[:] # private copy for this run

        results, multi_har_info = self.get_timeline_results(series = request.GET["series"],
                                                            from_ts = h.to_unix_date(request.GET["from"]),
                                                            to_ts = h.to_unix_date(request.GET["to"]),
                                                            zoom_min = request.GET.get("zoom_min"),
                                                            zoom_max = request.GET.get("zoom_max"),
                                                            navi = request.GET.get("navi"),
                                                            multi_har_idx = request.GET.get("mhidx"),
                                                            metrics = MULTIHAR_METRICS[:])

        if results:
            new_from_date = results[0]["timestamp"]
            new_to_date = results[-1]["timestamp"]
        else:
            new_from_date = new_to_date = ""

        # aggregate to MAX-number of values
        max_results = int(config["app_conf"].get("max_results", 100))

        if not results:
            # will be handled on the client-site
            return "no-data"

        # fill-in the time-gaps
        results = self._fill_time_gaps(results)


        if len(results) > max_results:
            aggregated_results = True
            results = self._aggregate_results(results, max_results, MULTIHAR_METRICS, AG_FUNCS)
        else:
            aggregated_results = False

        data, exclude = self._convert_results_to_datapoints(results)

        # Update list of titles
        if "onload_event" in exclude:
            titles.pop(titles.index("onLoad Event"))
        if "start_render_time" in exclude:
            titles.pop(titles.index("Start Render Time"))
        if "transaction_time" in exclude:
            titles.pop(titles.index("Transaction Time"))

        header = []
        for title in titles:
            header.append(title)

        output = [header]

        for dataset in data:
            if "n/a" not in dataset:
                output.append(dataset)

        points = ";".join(["#".join(dataset) for dataset in output])

        if aggregated_results:
            ag_timestamps = ";".join([result["ag_timestamp"] for result in results])
        else:
            ag_timestamps = ""

        if multi_har_info:

            if request.GET.get("mhidx") is None:
                mhidx = "-1"
            else:
                mhidx = request.GET.get("mhidx")

            multi_har_info_str = stringify_multi_har_info(multi_har_info, mhidx, unbound = True)

        else:
            multi_har_info_str = ""

        return "$".join([points, new_from_date, new_to_date, ag_timestamps, multi_har_info_str])

    @restrict("GET")
    def multi_har_info(self):
        """ Generate only the mutli-har-screenshots part """

        series_id = request.GET["series"]
        collection = MongoDB().collection

        # Parameters from GET request
        if request.GET.get("ag_timestamp"):
            # query all results aggregated on this point and
            # update the date-selector with all the found timestamps
            # and update the run-info-tabs with the first result
            timestamp_start, timestamp_end = request.GET["ag_timestamp"].split("#")

            # DB query

            test_results = collection.find({"timestamp": {"$gte": timestamp_start,
                                                          "$lte": timestamp_end},
                                            "series": ObjectId(series_id)})

            test_results = test_results.sort("timestamp")
            test_results = list(test_results)

            if not test_results:
                return json.dumps({"no_data": True})

            test_result = test_results[0]


        elif request.GET.get("timestamp"):
            # query only this one point and update only the run-info-tabs with it
            test_result = collection.find_one({"timestamp": request.GET["timestamp"],
                                               "series": ObjectId(series_id)})

            if not test_result:
                return json.dumps({"no_data": True})

        if request.GET.get("mhidx") is None:
            mhidx = "-1"
        else:
            mhidx = request.GET.get("mhidx")

        multi_har_info = self.get_multi_har_info(test_result)
        multi_har_info_str = stringify_multi_har_info(multi_har_info, mhidx, unbound = False)

        return json.dumps({"multi_har_info_str": multi_har_info_str})


    @restrict("GET")
    def runinfo(self):
        """Generate detailed data for each test run"""

        series_id = request.GET["series"]
        multi_har_idx = request.GET.get("mhidx")

        if multi_har_idx:
            collection = MongoDB().mh_collection
        else:
            collection = MongoDB().collection

        # Parameters from GET request
        if request.GET.get("ag_timestamp"):
            # query all results aggregated on this point and
            # update the date-selector with all the found timestamps
            # and update the run-info-tabs with the first result
            timestamp_start, timestamp_end = request.GET["ag_timestamp"].split("#")

            # DB query

            if multi_har_idx:
                test_results = collection.find({"idx": int(multi_har_idx),
                                                "timestamp": {"$gte": timestamp_start,
                                                              "$lte": timestamp_end},
                                                "series": ObjectId(series_id)})
            else:
                test_results = collection.find({"timestamp": {"$gte": timestamp_start,
                                                              "$lte": timestamp_end},
                                                "series": ObjectId(series_id)})

            test_results = test_results.sort("timestamp")
            test_results = list(test_results)

            if not test_results:
                return json.dumps({"no_data": True})

            test_result = test_results[0]


        elif request.GET.get("timestamp"):
            # query only this one point and update only the run-info-tabs with it
            test_results = []
            if multi_har_idx:
                test_result = collection.find_one({"idx": int(multi_har_idx),
                                                   "timestamp": request.GET["timestamp"],
                                                   "series": ObjectId(series_id)})
            else:
                test_result = collection.find_one({"timestamp": request.GET["timestamp"],
                                                   "series": ObjectId(series_id)})

        # Domains breakdown
        domains_req_ratio = dict()
        domains_weight_ratio = dict()

        # time breakdown
        res_by_time = {}
        har = HAR(self.read_har(test_result))

        for entry in har.har["log"]["entries"]:
            mime_type = entry["response"]["content"]["mimeType"]
            mime_type = h.norm_mime_type(mime_type)
            esum = res_by_time.setdefault(mime_type, 0)
            if "timings" in entry:
                esum += sum(entry["timings"].values())
            res_by_time[mime_type] = esum

        for hostname, value in test_result["domains_ratio"].items():
            hostname = re.sub("\|", ".", hostname)
            domains_req_ratio[hostname] = value[0]
            domains_weight_ratio[hostname] = value[1]

        # Summary stats
        summary = { "full_load_time":       test_result["full_load_time"],
                    "transaction_time":     test_result.get("transaction_time", 0),
                    "onload_event":         test_result["onload_event"],
                    "start_render_time":    test_result["start_render_time"],
                    "time_to_first_byte":   test_result["time_to_first_byte"],
                    "total_dns_time":       test_result["total_dns_time"],
                    "total_transfer_time":  test_result["total_transfer_time"],
                    "total_server_time":    test_result["total_server_time"],
                    "avg_connecting_time":  test_result["avg_connecting_time"],
                    "avg_blocking_time":    test_result["avg_blocking_time"],
                    "total_size":           test_result["total_size"],
                    "text_size":            test_result["text_size"],
                    "media_size":           test_result["media_size"],
                    "cache_size":           test_result["cache_size"],
                    "requests":             test_result["requests"],
                    "redirects":            test_result["redirects"],
                    "bad_requests":         test_result["bad_requests"],
                    "domains":              test_result["domains"]}

        # Page Speed Scores
        scores = dict()
        for rule, score in test_result["ps_scores"].items():
            scores[rule] = score

        # Data for HAR Viewer
        har_id = str(test_result["_id"])

        filename = os.path.join(config["app_conf"]["temp_store"], har_id)
        with open(filename, "w") as file:
            file.write(self.read_har(test_result).encode("utf-8"))

        screenshot_url = self._get_screenshot_url(test_result.get("screenshot"))

        # Final JSON
        return json.dumps({"timestamp":            test_result["timestamp"],
                           "summary":       summary,
                           "pagespeed":     scores,
                           "weights":       test_result["weights_ratio"],
                           "requests":      test_result["requests_ratio"],
                           "res_time":      res_by_time,
                           "d_weights":     domains_weight_ratio,
                           "d_requests":    domains_req_ratio,
                           "screenshot_url": screenshot_url,
                           "har":           har_id,
                           "timestamps":    [result["timestamp"] for result in test_results]})

    @restrict("GET")
    def harviewer(self):
        """HAR Viewer iframe"""

        # HAR Viewer customization via cookie
        response.set_cookie("phaseInterval", "-1", max_age=365*24*3600 )

        return render("/harviewer.html")


    def upload_rest(function):
        @functools.wraps(function)
        def wrapper(*args):
            result, ext = function(*args)

            if result == True:
                try:
                    if request.headers["automated"] == "true":
                        return "Successful"
                except KeyError:
                    redirect("/results/details?label=" + ext)
            else:
                try:
                    if request.headers["automated"] == "true":
                        return ext
                except KeyError:
                    c.error = ext
                    return render("/upload.html")

        return wrapper

    def _analyze_har(self, har):
        har.analyze()
        # Evaluate Page Speed scores
        if config["app_conf"]["ps_enabled"] == "true":
            scores = self._get_pagespeed_scores(har.har)
        else:
            scores = dict([("Total Score", 100)])

        # Add document to collection
        timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())

        # screenshot
        screenshot = har.get_screenshot()
        if screenshot:
            mdb_handler = MongoDB()
            screenshot = screenshot.decode("base64")
            screenshot_id = mdb_handler.fs.put(screenshot,
                                               filename = "[" + timestamp + "] " + har.url,
                                               tag = har.get_tag(),
                                               label = har.label,
                                               url = har.url,
                                               timestamp = timestamp)
        else:
            screenshot_id = None

        result = {  "tag":                  har.get_tag(),
                    "label":                har.label,
                    "url":                  har.url,
                    "timestamp":            timestamp,
                    "full_load_time":       har.full_load_time,
                    "transaction_time":     har.get_transaction_time(),
                    "onload_event":         har.onload_event,
                    "start_render_time":    har.start_render_time,
                    "time_to_first_byte":   har.time_to_first_byte,
                    "total_dns_time":       har.total_dns_time,
                    "total_transfer_time":  har.total_transfer_time,
                    "total_server_time":    har.total_server_time,
                    "avg_connecting_time":  har.avg_connecting_time,
                    "avg_blocking_time":    har.avg_blocking_time,
                    "total_size":           har.total_size,
                    "text_size":            har.text_size,
                    "media_size":           har.media_size,
                    "cache_size":           har.cache_size,
                    "requests":             har.requests,
                    "redirects":            har.redirects,
                    "bad_requests":         har.bad_requests,
                    "domains":              len(har.domains),
                    "ps_scores":            scores,
                    "har":                  har.get_har_string(),
                    "weights_ratio":        har.weight_ratio(),
                    "requests_ratio":       har.req_ratio(),
                    "domains_ratio":        har.domains,
                    "screenshot":           screenshot_id,
                    "error_msg":            har.get_error_msg(),
                    "info":                 har.get_info()  }

        return result

    def _get_series_id(self, tag, label, url, visual):

        series_doc = MongoDB().series.find_one({"url": url, "label": label})

        if not series_doc:

            series_doc = {"tag": tag,
                          "url": url,
                          "visual": visual,
                          "label": label,
                          "title": label + " (" + url + ")",
                          "last_run": datetime.datetime.now()}

            MongoDB().series.insert(series_doc)
        else:

            series_doc["last_run"] = datetime.datetime.now()
            series_doc["visual"] = visual
            MongoDB().series.update({"_id": series_doc["_id"]}, series_doc)


        return series_doc["_id"]



    def _make_doc_from_subdocs(self, sub_docs):
        """ Calculates MIN/MAX/AVG values from these docs and generates a "fake"-master-doc """

        doc = {"multi_har": True,
               "tag": sub_docs[0]["tag"],
               "label": sub_docs[0]["label"],
               "url": sub_docs[0]["url"],
               "timestamp": sub_docs[0]["timestamp"],
               "domains": None,
               "ps_scores": {"Total Score": 0},
               "har": None,
               "weights_ratio": None,
               "requests_ratio": None,
               "domains_ratio": None,
               "screenshot": sub_docs[0]["screenshot"]}

        n_a_keys = set()

        mins = {}
        maxs = {}
        sums = {}
        for sub_doc in sub_docs:
            for key in MULTIHAR_KEYS:
                value = sub_doc[key]
                if value == "n/a":
                    n_a_keys.add(key)
                    continue
                act_min = mins.get(key, sys.maxint)
                act_max = maxs.get(key, 0)
                act_sum = sums.get(key, 0)
                mins[key] = min(act_min, value)
                maxs[key] = max(act_max, value)
                sums[key] = act_sum + value

        for key in MULTIHAR_KEYS:
            if key in n_a_keys:
                doc[key] = "n/a"
                doc["min" + key] = "n/a"
                doc["max" + key] = "n/a"
            else:
                doc[key] = sums[key] / len(sub_docs)
                doc["min_" + key] = mins[key]
                doc["max_" + key] = maxs[key]

##        doc["sub_docs"] = sub_docs

        return doc

    @restrict("POST")
    @upload_rest
    def upload(self):
        """Controller for uploads of new test results"""

        har = hars = None

        if "file" in request.POST:
            try:
                har = HAR(request.POST["file"].value)
            except:
                har = HAR(request.POST["file"])
        elif "multi_file" in request.POST:
            try:
                multi_file = request.POST["multi_file"].value
            except:
                multi_file = request.POST["multi_file"]
            multi_har = json.loads(multi_file) # its a list of strings (each string a json itself)

            hars = []
            for tmp in multi_har:
                hars.append(HAR(tmp)) # parse each har as above
        else:
            return False, "no data."

        # MongoDB handler
        mdb_handler = MongoDB()

        # Analysis of uploaded data
        if har:
            if har.parsing_status == "Successful":
                # Parsing imported HAR file
                try:
                    doc = self._analyze_har(har)
                except Exception as error:
                    return False, ": ".join([type(error).__name__, error.message])
            else:
                return False, har.parsing_status
        elif hars:
            sub_docs = []
            for har in hars:
                if har.parsing_status == "Successful":
                    # Parsing imported HAR file
                    try:
                        doc = self._analyze_har(har)
                        sub_docs.append(doc)
                    except Exception as error:
                        raise
                        return False, ": ".join([type(error).__name__, error.message])
                else:
                    return False, har.parsing_status

            doc = self._make_doc_from_subdocs(sub_docs) # calculating min/max/avg from all subdocs


        if hasattr(c, "message"):
            return False, c.message
        else:
            doc["series"] = self._get_series_id(tag = doc["tag"], url = doc["url"], label = doc["label"], visual = har.get_visual())

            mdb_handler.collection.insert(doc)

            if hars:
                idx = 0
                for sub_doc in sub_docs:
                    sub_doc["master_doc"] = doc["_id"]
                    sub_doc["series"] = doc["series"]
                    sub_doc["idx"] = idx
                    mdb_handler.mh_collection.insert(sub_doc)
                    idx += 1

            return True, doc["label"]


    def _get_pagespeed_scores(self, har):
        #Store HAR for Page Speed binary
        hashname = hashlib.md5().hexdigest()
        temp_store = config["app_conf"]["temp_store"]
        filename = os.path.join(temp_store, hashname)

        with open(filename, "w") as file:
            file.write(json.dumps(har))

        # STDOUT,STDERR
        os_type = platform.system()

        if os_type == "Linux":
            std_out = " > /dev/null 2>&1"
        elif os_type == "Windows":
            std_out = " > NUL 2>&1"
        else:
            std_out = ""

        # Run pagespeed_bin
        bin_store = config["app_conf"]["bin_store"]
        pagespeed_bin = os.path.join(bin_store, "pagespeed_bin")

        outfile = filename + ".out"

        os.system(pagespeed_bin + \
            " -input_file " + filename + \
            " -output_format formatted_json" + \
            " -output_file " + outfile + \
            std_out)

        # Output report (JSON)
        with open(outfile, "r") as file:
            output = json.loads(file.read())

        # Final scores
        scores = dict()
        scores["Total Score"] = int(output["score"])
        for rule in output["rule_results"]:
            scores[rule["localized_rule_name"]] = int(rule["rule_score"])

        return scores

    @restrict("GET")
    def download(self):
        """Return serialized HAR file"""

        # Parameters from GET request
        id = request.GET["id"]

        # Read HAR file from disk
        filename = os.path.join(config["app_conf"]["temp_store"], id)
        with open(filename, "r") as file:
            data = file.read()

        # JSON to JSON-P
        data = "onInputData(" + data + ");"

        # Add content type header
        response.content_type = mimetypes.guess_type(filename)[0] or "text/plain"

        return data

    @restrict("GET")
    def screenshot(self):

        screen_id = ObjectId(request.GET["id"])

        screen_data = MongoDB().fs.get(screen_id).read()

        response.content_type = "image/jpeg"
        return screen_data

    def read_har(self, doc):
        if doc.has_key("har"):
            return doc["har"]
        else:
            return ""




def stringify_multi_har_info(multi_har_info, mhidx, unbound):

    if not multi_har_info:
        return ""

    multi_har_info_str = [mhidx]
    for info in multi_har_info:
        screenshot_url = info["screenshot_url"]
        info_url = h.shorten_url(info["url"])
        info_str = info["info"]
        error_str = info["error"]
        if error_str:
            info_str = "Error: " + error_str
        elif info_str:
            if not unbound:
                info_str += " (" + str(info["transaction_time"]) + " ms)"
        else:
            info_str = info_url
            if not unbound:
                info_str += " (" + str(info["transaction_time"]) + " ms)"

        if unbound:
            img_info = ""
            img_bar = ""
        else:
            img_info = "%.2f s" % (info["transaction_time"] / 1000.0)
            img_bar = "%i" % (info["transaction_time"] / 250)

        multi_har_info_str.append(screenshot_url + "*" + img_info + "*" + img_bar + "#" + info_str)

    return string.join(multi_har_info_str, "|")
