# Example
# Running Example
# Doctest
# File

import os, sys
import math, re, sqlite3
from collections import defaultdict
from colorsys import hsv_to_rgb
from optparse import OptionParser

from util import get_basename, open_db, parse_clause

DEBUG=False

bad_chars = re.compile('[^0-9a-zA-Z_]')
def identifier(s):
    return not bad_chars.search(s)

def analyse(conn, before_clause, after_clause, after_table='timings', type='example'):
    """
    This function compares individual statements (examples) against all other
    executions of that same statement accross other files and runs.  The
    tightness of the comparison depends on the history (state) whose
    possibilities are as follows:
    
        running - the sequence of commands affecting globals used in this
                  command is identical
        doctest - the sequence of commands from the beginning of this doctest
                  until now is identical
        total   - the sequence of commands from sage startup time until now
                  is identical
    
    In particular, this allows for timings to be correalated accross file
    changes and larger sample sizes for common examples.
    """
    c = conn.cursor()
    c.execute("select run_id, count(*) as c from %s left join runs using (run_id) where %s group by run_id order by c limit 1" % (after_table, after_clause))
    first = list(c)
    if not first:
        print after_clause
        raise ValueError("No matching doctests.")
    else:
        run_id = first[0]['run_id']
    c.execute("create temporary table _backdrop as " +
              "select basename, doctest, source, want, sequence_number, running_state, doctest_state, total_state " +
              "from %s left join runs using (run_id) where %s and run_id='%s' and type='%s'" % (after_table, after_clause, run_id, type))
    target_query = (
        "select _backdrop.*, " +
        "sum(AFTER_TABLE.count_) as after_count, sum(AFTER_TABLE.cputime * AFTER_TABLE.count_) / sum(AFTER_TABLE.count_) as after_cputime, sum(AFTER_TABLE.walltime * AFTER_TABLE.count_) / sum(AFTER_TABLE.count_) as after_walltime ".replace("AFTER_TABLE", after_table) +
        "from _backdrop join %s using (total_state) left join runs using (run_id) where %s and type='%s'" % (after_table, after_clause, type) +
        "group by total_state")
    c.execute("create temporary table _target as %s" % target_query)
    
    aggregate_fields = ("(sum(walltime * count_) / sum(count_))",
                        "sum(walltime_ss)",
                        "(sum(cputime * count_) / sum(count_))",
                        "sum(cputime_ss)",
                        "sum(count_)")
    aggregate_expression = " || ',' || ".join(aggregate_fields)
    
    # Here we do a complicated join to group things according to the various running checksums.
    target_fields = 'basename', 'doctest', 'sequence_number', 'source', 'want', 'after_count', 'after_cputime', 'after_walltime'
    all_fields = ['_target.%s as %s' % (field, field) for field in target_fields]
    join_clauses = []
    order_by = ('basename', 'doctest', 'sequence_number')
    sub_query = ("(SELECT %s from timings join runs using (run_id) " +
                 "where %s AND type='%s' AND STATE_state=_target.STATE_state GROUP BY STATE_state) as STATE_data") % (aggregate_expression, before_clause, type)
    for state in ('running', 'doctest', 'total'):
        all_fields.append(sub_query.replace("STATE", state))
    query = "select %s from _target order by %s" % (", ".join(all_fields), ", ".join(order_by))
    if DEBUG or True:
        print query
        c.execute("EXPLAIN QUERY PLAN " + query)
        print
        for row in c:
            print row
        print
    c.execute(query)
    return c

def analyse_simple(conn, before_clause, after_clause, join_field='doctest', type='package', packages=None):
    """
    Compares doctest timings between runs.
    
    Note that full files and packages as a whole are also stored as "doctests"
    with the times of their sub-components summed.
    """
    c = conn.cursor()
    package_clause = create_package_clause(packages)
    query = ("create temporary table _SIDE as " +
            "select doctest, " +
                    "sum(walltime * count_) / sum(count_) as SIDE_walltime, " +
                    "sum(walltime_ss) as SIDE_walltime_ss, "+
                    "sum(cputime * count_) / sum(count_) as SIDE_cputime, " +
                    "sum(cputime_ss) as SIDE_cputime_ss, "
                    "sum(count_) as SIDE_count " +
            "from timings join runs using (run_id) " +
            "where CLAUSE AND type='%s' AND %s " % (type, package_clause) + 
            "group by (doctest)")
    c.execute(query.replace("SIDE", "before").replace("CLAUSE", before_clause))
    c.execute(query.replace("SIDE", "after").replace("CLAUSE", after_clause))
    c.execute("create unique index _after_index on _after (doctest)")
    if DEBUG:
        print "-" * 72
        for row in c.execute("select * from _before"):
            print row
        print "-" * 72
        for row in c.execute("select * from _after"):
            print row
        print "-" * 72
    c.execute("select * from _before join _after using (doctest)")
    return c

def generate_stats(data, state=None, type="mixed", cpu_or_wall="cpu", always=True):
    if state is None:
        return (
            generate_stats(data, 'total', type, cpu_or_wall, always=False) or
            generate_stats(data, 'doctest', type, cpu_or_wall, always=False) or
            generate_stats(data, 'running', type, cpu_or_wall))
    if not data.get(state + '_walltime'):
        if always:
            return [0, "white", "No previous data."]
        else:
            return None
    after = data["after_" + cpu_or_wall + "time"]
    before = mean = data[state + "_" + cpu_or_wall + "time"]
    count = data[state + "_" + "count"]
    sum = count * mean
    sum_squares = data[state + "_" + cpu_or_wall + "time_ss"]
    if count == 1:
        std_dev = before / 4 # just a guess
    else:
        sum_diff = sum_squares - mean * sum
        if -1e-10 < sum_diff <= 0:
            std_dev = 1e-100 # rounding error
        else:
            std_dev = math.sqrt(sum_diff / (count - 1))
    diff = after - before
    if type == "mixed":
        type = "raw" if abs(diff) > 2 * std_dev else "normalized"
    if type == "normalized":
        diff /= 2 * std_dev
    else:
        diff = math.log(after / before) / math.log(2)
    if abs(diff) > 1:
        sgn = diff / abs(diff)
        # map (1,oo) onto (1,2)
        diff = sgn * (2 - 1/abs(diff))
    hue = (2 - diff) / 6
    r, g, b = hsv_to_rgb(hue, 1, 1)
    return [diff,
            "#%02x%02x%02x" % (int(r*255), int(g*255), int(b*255)),
            "Current %.2g Previous %.2g &plusmn; %d%% (%s runs)" % (after, before, int(std_dev/before * 200),
            int(count))]

def de_concatinate(d):
    """
    This undoes the hack of concatinating all the stats becase sqlite can't handle subselects with multiple values in an expression.
    """
    for state in ('running', 'doctest', 'total'):
        if d[state + '_data']:
            for id, data in zip(('walltime', 'walltime_ss', 'cputime', 'cputime_ss', 'count'), d[state + '_data'].split(',')):
                d[state + '_' + id] = float(data)
    

def analyse_single_file(*args):
    stat_permutations = [(cpu_or_wall, type, state)
                            for cpu_or_wall in ('cpu', 'wall')
                            for type in ("normalized", "mixed", "raw")
                            for state in (None, 'running', 'doctest', 'total')]
    from jinja2 import Template
    template = Template(open("report.html").read())
    stats = []
    def doctest_iter():
        last_doctest = None
        doctest_data = None
        for row in analyse(*args):
            if last_doctest != row['doctest']:
                last_doctest = row['doctest']
                if doctest_data is not None:
                    yield doctest_data
                examples = []
                doctest_data = dict(
                    id = "doc",
                    doctest = row['doctest'],
                    examples = examples,
                )
            example = dict(row)
            example['id'] = len(stats)
            de_concatinate(row)
            stats.append([generate_stats(row, state=state, cpu_or_wall=cpu_or_wall, type=type) 
                            for cpu_or_wall, type, state in stat_permutations])
            examples.append(example)
        if doctest_data is not None:
            yield doctest_data
                
    return template.render(stats=stats, doctests=doctest_iter(), enumerate=enumerate)

cpu_or_wall_values = ('cpu', 'wall')
type_values = ('normalized', 'mixed', 'raw')

def cross_product_iter(*args):
    if len(args) == 0:
        yield ()
    else:
        for value in args[0]:
            for rest in cross_product_iter(*args[1:]):
                yield (value,) + rest

def cross_product(*args):
    return list(cross_product_iter(*args))

package_stat_permutations = cross_product(cpu_or_wall_values, type_values)


def analyse_packages(conn, before_clause, after_clause, depth=None, order='doctest', packages=None):
    stat_permutations = [(cpu_or_wall, type)
                            for cpu_or_wall in ('cpu', 'wall')
                            for type in ("normalized", "mixed", "raw")]
    all = []
    for row in analyse_simple(conn, before_clause, after_clause, packages=packages):
        if depth and row['doctest'].count('.') >= depth:
            continue
        data = dict(row)
        stats = {}
        for stat in package_stat_permutations:
            cpu_or_wall, type = stat
            stats[stat] = generate_stats(row, state='before', cpu_or_wall=cpu_or_wall, type=type)
        data['stats'] = stats
        all.append(data)
    if order:
        cmp_fn = lambda a, b: cmp(a['doctest'], b['doctest'])
    else:
        # default
        cpu_or_wall = 'cpu'
        type = 'normalized'
        # override
        if 'wall' in order:
            cpu_or_wall = 'wall'
        if 'mixed' in order:
            type = 'mixed'
        elif 'raw' in order:
            type = 'raw'
        extract = lambda x: x['stats'][cpu_or_wall, type][0]
        cmp_fn = lambda(a, b): cmp(extract(a), extract(b))
    all.sort(cmp_fn)
    return all

def render_packages(*args, **kwds):
    from jinja2 import Template
    template = Template(open("packages.html").read())
    return template.render(packages=analyse_packages(*args, **kwds), stat_dims=(cpu_or_wall_values, type_values), cross_product=cross_product, enumerate=enumerate)

def create_package_clause(package):
    if package is None:
        return "1"
    elif isinstance(package, list):
        return "(%s)" % " OR ".join(map(create_package_clause, package))
    assert "'" not in package
    return "substr(doctest, 1, %s) == '%s'" % (len(package), package)


if __name__ == "__main__":
    parser = OptionParser()
    parser.add_option("-b", "--before", dest="before", default="all", metavar="SQL_CLAUSE", help="version, run_id, date")
    parser.add_option("-a", "--after", dest="after", default="last", metavar="SQL_CLAUSE")
    parser.add_option("-d", "--database", dest="database", default="timings.db", metavar="/path/to/db.sqlite")
    parser.add_option("--package", dest="package_list", metavar="pkg.subpkg", action="append")
    parser.add_option("--html", dest="html_output", metavar="/path/to/output.html")
    
    parser.add_option("--timing", dest="cpu", default="both", metavar="[cpu|wall|both]")
    parser.add_option("--match", dest="match", default="best", metavar="[total|doctest|running|best]")
    parser.add_option("--list", dest="list", default=None, metavar="SQL_CLAUSE or N")
    parser.add_option("--list_count", dest="list_count", type=int, default=10, metavar="N")
    parser.add_option("--sql", dest="sql", metavar="SQL")
    
    (options, args) = parser.parse_args()

    conn = open_db(options.database)
    if options.list:
        try:
            list_count = int(options.list)
            list_clause = "1"
        except ValueError:
            list_count = options.list_count
            list_clause = parse_clause(options.list, conn, time_column='run_id')
        sql = ("select run_id, version, summarize(basename) as summary, count(*) as c, '' as extra from timings join runs using (run_id) " +
               "where type='file' and (%s) and (%s)" % (create_package_clause(options.package_list), list_clause) +
               "group by run_id order by run_id desc limit %s" % list_count)
        print sql
        for row in conn.execute(sql):
            print row['run_id'], '\t', row['summary'], '\t', "(%s)" % row['c'], row['version'], row['extra']
        sys.exit(0)
    
    if options.sql:
        for row in conn.execute(options.sql):
            print row
        sys.exit(0)
            
    
    before_clause = parse_clause(options.before, conn)
    after_clause = parse_clause(options.after, conn)

    if args:
        for filename in args:
            basename = get_basename(filename)
            basename_clause = " AND timings.basename='%s'" % basename
            open("%s.html" % basename, "w").write(analyse_single_file(conn, before_clause, after_clause + basename_clause))
    else:
        open("all.html", "w").write(render_packages(conn, before_clause, after_clause, packages=options.package_list))
