#!/usr/bin/env python
'''Module for I/O operations on filesystem.
Used to write the index.html to display the graphical interface.
Version: 0.1
Author: balzano
Date: 20.11.2013

    Usage:
'''
from __future__ import with_statement, absolute_import, print_function, division
import time
from optparse import OptionParser
import re
import string
# only for logging
import logging
import sys
import os
from itertools import chain
from datetime import datetime
from operator import itemgetter
from os.path import abspath, dirname, sep
from sys import path
import socket


try:
    from win32com.shell import shell, shellcon
    HOME_DIR = shell.SHGetFolderPath(0, shellcon.CSIDL_PROFILE, None, 0)
except ImportError:
    HOME_DIR = os.path.expanduser('~')

# assumes the standard distribution paths
PACKAGE_DIR = dirname(abspath(path[0]))

# webpy
from . import web
# config file
from . import config_pyanalyze
# database
from . import lib_database
from .lib_database import PyAnalyzeDatabase
# pdf package
from .fpdf import FPDF, HTMLMixin   

# FPDF can buffer output or write to file
WRITE_TO_FILE = 'F'
# html related
# TODO: relative path?
# template that contains style and header
START_TEMPLATE_NAME = 'start_template.html'
# template that contains footer
END_TEMPLATE_NAME = 'end_template.html'
# names and headers
BROWSED_TABLE_HEADER = ['Session ID','Session Start', 'URL', 'Page Load Time','Waterfall Graph']
METRICS_TABLE_HEADER = ['Session ID','Session_Start','avg(DNS)','avg(HTTP)','avg(TCP)','Page Load Time (ms)']
DIAGNOSE_TABLE_HEADER_CLIENTGAP = ['Session ID','Session Start','Page Load Time','Total Client Idle Time']
DIAGNOSE_TABLE_HEADER_SERVER = ['Session ID','Session Start','Page Load Time','avg(HTTP)', 'avg(TCP)','avg(HTTP) / avg(TCP)']
DIAGNOSE_TABLE_HEADER_INTERNET = ['Session ID','Session Start','Page Load Time','avg (Curr TCP degradation)','avg (Near Hosts TCPs degradation)','F1*','F2*']
DIAGNOSE_TABLE_HEADER_LOCAL = ['Session ID','Session Start','Page Load Time','avg (Curr TCP degradation)','avg (Near Hosts TCPs degradation)','F2*']
DIAGNOSE_TABLE_HEADER_LOCAL_STD = ['Session ID','Session Start','Page Load Time','avg (Curr TCP degradation)','avg (Near Hosts TCPs degradation)','F1*']
DIAGNOSE_RESUME_HEADER = ['','Client', 'Server','Local Access', 'Wild Internet']
MID_COL_NAME = 'col1'
MID_COL_HEADER = 'No Data'
LEFT_COL_NAME = 'col2'
LEFT_COL_HEADER = 'Main Links'
RIGHT_COL_NAME = 'col3'
RIGHT_COL_HEADER = 'Ip Geolocation'
RIGHT_COL_TABLE_HEADER = ['Parameter', 'Value']
# html tags
BR_TAG = '<BR>'
IMG_TAG = '<IMG src="%s" alt=%s>'
LI_START_TAG = '<li>'
LI_END_TAG = '</li>'
UL_START_TAG = '<ul>'
UL_NOSTYLE_START_TAG = '<ul style="list-style: none;">'
UL_END_TAG = '</ul>'
DIV_START_TAG = '<div class=%s>'
DIV_END_TAG = '</div>'
HEADER_TAG = '<h2>%s</h2>'
HEADER_TAB_TAG = '<h3>%s</h3>'
TABLE_START_TAG = '<table border="1" align="center">'
TABLE_END_TAG = '</table>'
TR_START_TAG = '<tr>'
TR_END_TAG = '</tr>'
TH_TAG = '<th>%s</th>'
TH_TAG_WIDTH = '<th width="50%">%s</th>'
TH_TAG_ALIGNED = '<th align="center">%s</th>'
TD_START_TAG = '<td>'
TD_START_TAG_ALIGNED = '<td align="center">'
TD_END_TAG = '</td>'
FONT_RED_TAG = '<font color="red">%s</font>'
A_TAG = '<a href="%s">%s</a>'
P_TAG_WITH_ID = '<p id="%s%">%s</p>'
P_TAG = '<p>%s</p>'
# nr. of spaces before the DIV tags in the main page
NR_DIV_SPACES = 13
# nr. of spaces before the h2/IMG tags in the main page
NR_INNER_SPACES = 17
# end of line
END_LINE = '\n'
# separator for directories when accessing through http
# for example, you for an image, it will always be ..\images\xx.png
PATH_SEPARATOR = '/'
# text to display for Service link
TEXT_YT = ' video'
# text to display for cache server
TEXT_CACHE = 'cache server'
# names will have parameter and timestamp connected by '_'
NAME_CONNECTOR = '_'
# dictionaries/lists with parameters to plot
# key that contains all the parameters to plot
ALL_KEY = 'All_Parameters'
# key that contains all the main parameters to plot
MAIN_KEY = ' Main_Parameters'



# key that is used to display the list of browsed websites 
URLS_KEY = 'HTTP_Tracing_Logs'
# key that is used to display the list of browsed websites 
GEN_KEY = 'General_Metrics'

STAT_KEY = 'Statistics'
DIAGNOSE_KEY = 'Diagnosis_results'
CPU_SIGNAL = 'CPU_Signal_Strenght'
GRAPHS_KEY = 'CDFs_GRAPHS'
ARRAY = "[['Task', 'Sessions'],['Client Side',     15],['Server Side',      10],['Local Access',  39],['Wild Internet', 10]]"

DIV_PIE_CHART = ( 
#     "<script type='text/javascript' src='https://www.google.com/jsapi'></script>"   
#     "<script type='text/javascript'>"
#       "google.load('visualization', '1', {packages:['corechart']});"
#       "google.setOnLoadCallback(drawChart);"
#       "function drawChart() {"
#         "var data = google.visualization.arrayToDataTable(["
#           "['Task', 'Sessions'],"
#           "['Client Side',     15],"
#           "['Server Side',      10],"
#           "['Local Access',  39],"
#           "['Wild Internet', 10]"
#         "]);"
#  
#         "var options = {"
#           "//title: 'Limitation Causes for High PLT',"
#           "backgroundColor: '#EBF4FA',"
#           "chartArea:{left:20,top:20,width:'90%',height:'90%'},"
#           "is3D: 'true',"
#           "legend:{    position: 'right', "
#                       "textStyle: {color: 'blue', fontSize: 16},"
#                       "alignment: 'center'}"
#         "};"
#         "var chart = new google.visualization.PieChart(document.getElementById('chart_div'));"
#         "chart.draw(data, options); }"
#     "</script>"
#      
"<div id='chart_div' name='diagnose' data='Task, Sessions,Client Side,15, Server Side, 32, Local Access, 13, Wild Internet, 14' style='height: 300px'></div>")
#style='width:60%; left:20% ;height: 500px'


# key that is used to display the existent databases in the database directory
DB_KEY = 'Database_Reset'
# key that is used to link to the description
DOC_KEY = 'Project_documentation'
# database extension
DB_EXTENSION = '(pyanalyze_database\.db)$'
# database input parameter
DB_INPUT = '?db='
# user input parameter
USER_INPUT = 'user='


ALL_PAGES = [GEN_KEY,URLS_KEY,STAT_KEY,DIAGNOSE_KEY,GRAPHS_KEY,CPU_SIGNAL,DB_KEY]

# dictionary with mappings for what to plot
ALL_PLOTS = {
        ALL_KEY: ['VideosWithInterruptions', 'AvgThroughput',
                   'BufferingDuration', 'PingAvg'],
        MAIN_KEY: ['VideosWithInterruptions', 'AvgThroughput',
                   'BufferingDuration', 'PingAvg'],
        'QoE': ['VideosWithInterruptions','DownloadInterruptions',
                'ReceptionRatio', 'BufferingDuration', 'BufferDurationAtEnd'],
        'QoS': ['AvgThroughput', 'MaxInstantThp', 'InitialRate',
                'InitialData', 'DownloadBytes', 'DownloadTime',
                'PlaybackDuration', 'PingMin', 'PingAvg', 'PingMax'],
        'Video_characteristics': ['VideoLength', 'VideoDuration',
                                  'EncodingRate'],
        }
# dictionary with explanations for what is plotted
MAN_PLOTS = {
        'PingMin': 'The minimum recorded ping time to the resolved IP address'
                    ' of the cache server',
        'PingAvg': 'The average recorded ping time to the resolved IP address'
                    ' of the cache server',
        'PingMax': 'The maximum recorded ping time to the resolved IP address'
                    ' of the cache server',
        'DownloadTime': 'The time taken to download the video sample (we do not'
                        ' download the entire video, but only its beginning)',
        'VideoDuration': 'The actual duration of the complete video',
        'VideoLength': 'The length of the complete video',
        'EncodingRate': 'The encoding rate of the video:'
                        ' VideoLength / VideoDuration',
        'DownloadBytes': 'The length of the video sample',
        'DownloadInterruptions': 'Number of interruptions experienced during'
                                 ' the download',
        'InitialData': 'Data downloaded in the initial buffering period',
        'InitialRate': 'The mean data rate during the initial'
                       ' buffering period',
        'BufferingDuration': 'Accumulated time spent in buffering state',
        'PlaybackDuration': 'Accumulated time spent in playing state',
        'BufferDurationAtEnd': 'The buffer length at the end of the download',
        'MaxInstantThp': 'The maximum instantaneous throughput of the '
                         'download',
        'AvgThroughput': 'The average throughput: DownloadBytes / DownloadTime',
        'ReceptionRatio': 'Quality of Experience parameter: '
                     'AvgThroughput / EncodingRate',
        'VideosWithInterruptions': 'Quality of Experience parameter: signals'
                            ' interruptions during video download <BR>'
                            '(1 - there are interuptions, 0 - no interuptions)'
        }
# average parameters
LOCAL_PARAM_DESCRIPTION = ['IP Address', 'Country',
                'City', 'Region', 'Latitude',
                'Longitude']
AVERAGE_PARAM = ['DownloadTime', 'DownloadInterruptions', 'PingAvg']
# parameters regarding urls - must change Service position when needed!!!
URL_PARAM = ['Url', 'CacheUrl', 'Service']
SERVICE_POS_IN_URL = 2
# indexes of parameters (for example {'Url': 0})
#INDEX_DICT = dict((v,k) for (k,v) in enumerate(DATA_PARAM))
# timestamp position in the data extracted from the database
TIMESTAMP_POSITION = -1
# max timestamp length (not to display fractions)
MAX_TIMESTAMP_LENGTH = 19
# precision of average parameters
AVERAGE_PRECISION = 4
# message to display in case there are not enough records in db to create plots
DB_NO_RECORDS_MSG = ('There are not enough records in the selected database.'
                    ' Perform some browsing experiments before showing the results.')
# only make page autorefresh if database has changed recently
REFRESH_TIMEOUT = 120000 # 120s
# http://ckon.wordpress.com/2008/07/25/stop-using-windowonload-in-javascript/
ONLOAD_REFRESH_SCRIPT = ('<script type="text/JavaScript">'
                         'window.addEventListener ?\n'
                         'window.addEventListener("load",'
                                                     'AutoRefresh(%i),false):\n'
                         'window.attachEvent && window.attachEvent("onload",'
                                                    'AutoRefresh(%i));\n'
                         '</script>\n'% (REFRESH_TIMEOUT, REFRESH_TIMEOUT))

# Script for the IP Address
IP_ADDRESS_SCRIPT = ('<script type="application/javascript">'
                        'function getip(json){document.write(json.ip);}'
                    '</script>'
                    '<script type="application/javascript" src="http://jsonip.appspot.com/?callback=getip"></script>')

# MaxMind scripts to geolocalize users
# Country
MAXMIND_COUNTRY_SCRIPT = ('<script id="idtest" type="text/javascript">'
                             'document.write( geoip_country_name() );  // get country name'
                          '</script>')
# City
MAXMIND_CITY_SCRIPT = ('<script type="text/javascript">'
                           'document.write( geoip_city() );// get city name'
                       '</script>')
# Region
MAXMIND_REGION_SCRIPT = ('<script type="text/javascript">'
                             'document.write( geoip_region_name() );// get region name'
                         '</script>')
# Latitude
MAXMIND_LATITUDE_SCRIPT = ('<script type="text/javascript">'
                               'document.write( geoip_latitude() );// get latitude'
                           '</script>')
# Longitude
MAXMIND_LONGITUDE_SCRIPT = ('<script type="text/javascript">'
                              'document.write( geoip_longitude() );// get longitude'
                           '</script>')

LOCAL_PARAMETERS_VALUES = [IP_ADDRESS_SCRIPT, MAXMIND_COUNTRY_SCRIPT, MAXMIND_CITY_SCRIPT, 
                           MAXMIND_REGION_SCRIPT, MAXMIND_LATITUDE_SCRIPT, MAXMIND_LONGITUDE_SCRIPT]


# Service being crawled taken from the database info, default config_pyanalyze
SERVICE = config_pyanalyze.CRAWL_SERVICE
# message to display PDF path
PDF_MSG = 'The PDF report of this page can be found at:<BR>'
PDF_HEADER = '<H1 align="center">Firelog Web Interface Report</H1>'
# how many BR to insert in order to not write man under image
PDF_SPACE_FOR_IMG = 12
# how many BR to insert between images
PDF_SPACE_BETWEEN = 5
# connector for http links (needed for rel path of pdf on win)
HTTP_CONNECTOR = '/'



class PyanalyzeFPDF(FPDF, HTMLMixin):
    ''' Class to create pdf from html
    '''

    def __init__(self, pdf_name):
        ''' Set the pdf name
        '''
        self.pdf_name = pdf_name
        super(PyanalyzeFPDF, self).__init__()

    def close_pdf(self):
        self.output(self.pdf_name, WRITE_TO_FILE)

def create_pdf(pdf_name, timestamp, html):
#     config_pyanalyze.LOG.error('pdf_name = %s; timestamp = %s' %
#                            (pdf_name, timestamp))
#      
    #print('pdf_name = %s; timestamp = %s; parameters = %s' % (pdf_name, timestamp, 'prova'))
    pdf = PyanalyzeFPDF(pdf_name)
    pdf.add_page()
    #config_pyanalyze.LOG.debug('Added 1 page')
    html_text = html_graphs_for_pdf(timestamp, html)
    print('create pdf',html_text)
    pdf.write_html(html_text)
    
    #print('pdf_creato')
    
    #config_pyanalyze.LOG.debug('Wrote html')
    pdf.close_pdf()
    #config_pyanalyze.LOG.debug('Closed pdf')


def html_graphs_for_pdf(timestamp, html):
    ''' Function to return the html containing graphs and their explanation
    for the *parameters
    '''
    
    
    html_text = PDF_HEADER
    html_text += html 
#     # average values
#     if not average_values:
#         print('No data has been received to create the list')
#         return html_text
#     # start list of average values
#     html_text += (' ' * NR_INNER_SPACES + UL_START_TAG + END_LINE)
#     for parameter, value in zip(LOCAL_PARAM_DESCRIPTION, average_values):
#         html_text += ((' ' * NR_INNER_SPACES + LI_START_TAG + '%s: %s' +
#                     LI_END_TAG + END_LINE) % (parameter, value))
     
    print('testpdf')
    #append(f_index)
    # Write right column
    #write_right_column(html_text, [1,2,3])
     
    # end list of average values
    #html_text += (' ' * NR_INNER_SPACES + UL_END_TAG + END_LINE)
    html_text += (BR_TAG)
    
    
    
#     f = open('/home/salvo/workspace/PyAnalyze-0.2/templates/213343835/213343835.1340129392.General_Metrics_index.html', 'r')
#     read_data = f.read()
#     
#     f.closed
#     html_text += read_data
#     print(html_text)
#     f = open('/home/salvo/workspace/PyAnalyze-0.2/templates/213343835/213343835.1340129392.General_Metrics_index2.html','w')
#     for line in html_text:
#             f.write(line)
    
    
    # graphs
#    # if there are not enough records in the db to display plots
#    if not timestamp and parameters[0] != DB_KEY:
#        html_text += ((P_TAG + END_LINE) % DB_NO_RECORDS_MSG)
#        return
#    # check which graphs to display
#    if parameters[0] not in [URLS_KEY, DB_KEY]:
#        param_to_display = parameters
#    else:
#        param_to_display = ALL_PLOTS[MAIN_KEY]
#    # the middle column consists of plots for the parameters
#    # the graphs
#    for parameter in param_to_display:
#        for index in xrange(PDF_SPACE_BETWEEN):
#            html_text += (' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
#        # function is called from pytomo directory, the image dir is
#        # a subfolder of parent directory
#        html_text += ((' ' * NR_INNER_SPACES + IMG_TAG + END_LINE) %
#                 (plot_path_to_write_in_pdf(parameter, timestamp), parameter))
#        for index in xrange(PDF_SPACE_FOR_IMG):
#            html_text += (BR_TAG)
#        html_text += ((' ' * NR_INNER_SPACES + P_TAG + END_LINE) %
#                    MAN_PLOTS[parameter])
    
    return html_text


# TODO: check on windows
def plot_path_to_write_in_pdf(param, timestamp):
    ''' Return the path to the plot relative to the TEMPLATES_DIR
    Will have the pattern:
        <RRD_PLOT_DIR>/<plot_name>
    '''
    return PATH_SEPARATOR.join((config_pyanalyze.RRD_PLOT_DIR,
                            os.path.basename(plot_filename(param, timestamp))))

def pdf_filename(param, timestamp):
    ''' Return the file name of the pdf (try to create it if it does not
    exist).  Will have the pattern:
                <PDF_DIR>/<hostname>.<timestamp>.<param_PDF_FILE>
    '''
    path = 'images/eurecom.gif'
    return path
    
#    return check_out_files(NAME_CONNECTOR.join((param,
#                                    config_pyanalyze.PDF_FILE)),
#                                    config_pyanalyze.PDF_DIR, str(timestamp))


def plot_filename(param, timestamp):
    ''' Return the file name of the plot (try to create it if it does not
    exist).  Will have the pattern:
                <RRD_PLOT_DIR>/<hostname>.<timestamp>.<param_IMAGE_FILE>
    '''
    path = 'images/eurecom.gif'
    return path
    
#    return check_out_files(NAME_CONNECTOR.join((param,
#                                    config_pyanalyze.IMAGE_FILE)),
#                                    config_pyanalyze.RRD_PLOT_DIR, str(timestamp))


def index_filename(param, timestamp):
    ''' Return the file name of the index (try to create it if it does not
    exist). Will have the pattern:
                <TEMPLATES_DIR>/<hostname>.<timestamp>.<param_TEMPLATE_FILE>
    '''
    
    return check_out_files(NAME_CONNECTOR.join((param,config_pyanalyze.TEMPLATE_FILE)),
                                    config_pyanalyze.TEMPLATES_DIR, str(timestamp))

def check_templates_exist(timestamp):
    ''' Verify that all html templates and their plots have been created.
    '''
    for parameter in chain(ALL_PAGES):
        if (not (os.path.getsize(index_filename(parameter, timestamp)))
                or not (os.path.getsize(plot_filename(parameter, timestamp)))):
            print('File not found')
            return False
    
    return True

def check_out_files(file_pattern, directory, timestamp):
    """Return a full path of the file used for the output
    Test if the path exists, create if possible or create it in default user
    directory

    >>> file_pattern = None
    >>> directory = 'logs'
    >>> timestamp = 'doc_test'
    >>> check_out_files(file_pattern, directory, timestamp) #doctest: +ELLIPSIS
    >>> file_pattern = 'pytomo.log'
    >>> check_out_files(file_pattern, directory, timestamp) #doctest: +ELLIPSIS
    '...doc_test.pytomo.log'

    """
    if file_pattern == None:
        return None
    if config_pyanalyze.USE_PACKAGE_DIR:
        base_dir = PACKAGE_DIR
    else:
        base_dir = os.getcwd()
    if directory:
        out_dir = sep.join((base_dir, directory))
        if not os.path.exists(out_dir):
            try:
                os.makedirs(out_dir)
            except OSError, mes:
                print(
                    'Out dir %s does not exist and cannot be created\n%s'
                    % (out_dir, mes))
                if HOME_DIR:
                    print('Will use home base dir: %s'
                                           % HOME_DIR)
                    out_dir = sep.join((HOME_DIR, directory))
                    if not os.path.exists(out_dir):
                        # do not catch OSError as it's our second attempt
                        os.makedirs(out_dir)
                else:
                    print(
                        'Impossible to create output file: %s' % file_pattern)
                    raise IOError
    else:
        out_dir = os.getcwd()
    
    
    # socket.gethostname() al posto di CLIENTID
    if file_pattern == 'pyanalyze.log':
        out_file = sep.join((out_dir, '.'.join((socket.gethostname(),
                                            timestamp, file_pattern))))
    else:
        out_file = sep.join((out_dir, '.'.join((config_pyanalyze.CLIENTID,
                                            timestamp, file_pattern))))
    # do not catch IOError
    with open(out_file, 'a') as _:
        # just test writing in the out file
        print('Checking out index files --> ',file_pattern, directory, timestamp)
        pass
    return out_file



def get_latest_specific_file(path, include):
    ''' Function to return the newest file in a path
    >>> import os.path
    >>> from tempfile import NamedTemporaryFile
    >>> INCLUDE = 'test'
    >>> f = NamedTemporaryFile(suffix=INCLUDE, delete=False)
    >>> f.name == get_latest_specific_file(os.path.dirname(f.name), INCLUDE)
    True
    >>> f.close()
    >>> os.unlink(f.name)
    '''
    try:
        return max(get_specific_files(path, include), key=os.path.getmtime)
    except TypeError:
        #config_pyanalyze.LOG.warning('There is no file in %s that includes %s!' %
        #                          (path, include))
        return None

def get_specific_files(path, include):
    ''' Function to return all the files in path that contain include string in
    their name
    >>> import os.path
    >>> from tempfile import NamedTemporaryFile
    >>> INCLUDE = 'test'
    >>> f1 = NamedTemporaryFile(suffix=INCLUDE, delete=False)
    >>> f2 = NamedTemporaryFile(suffix=INCLUDE, delete=False)
    >>> f3 = NamedTemporaryFile(delete=False)
    >>> set([f1.name, f2.name]) == set(
    ... get_specific_files(os.path.dirname(f1.name), INCLUDE))
    True
    >>> set([f1.name, f2.name, f3.name]) == set(
    ... get_specific_files(os.path.dirname(f1.name), INCLUDE))
    False
    >>> f1.close()
    >>> f2.close()
    >>> f3.close()
    >>> os.unlink(f1.name)
    >>> os.unlink(f2.name)
    >>> os.unlink(f3.name)
    '''
    p = re.compile(include)
    for root, dirs, files in os.walk(path):
        # both Linux&Win must have / as separator (http acces)
        return sorted([(path + PATH_SEPARATOR + name)
                for name in files if p.search(name)], key=os.path.getmtime,
                                                               reverse=True)

# AO 20121015 not used anymore
def get_file_by_param_timestamp(path, parameter, timestamp):
    ''' Function to return from the path directory the files for a specific
     parameter timestamped or None.
     The filenames are relative to the parent directory.
    >>> import os.path
    >>> from tempfile import NamedTemporaryFile
    >>> from time import time
    >>> PARAM = 'DownloadTime'
    >>> TIMESTAMP = str(int(time()))
    >>> RRD_PLOT_DIR = 'images'
    >>> f1 = NamedTemporaryFile(suffix=PARAM, dir=RRD_PLOT_DIR, delete=False)
    >>> f2 = NamedTemporaryFile(suffix=TIMESTAMP, dir=RRD_PLOT_DIR,
    ... delete=False)
    >>> f3 = NamedTemporaryFile(suffix=(PARAM + '_' + TIMESTAMP),
    ... dir=RRD_PLOT_DIR, delete=False)
    >>> os.path.basename(f3.name) == os.path.basename(
    ... get_file_by_param_timestamp(RRD_PLOT_DIR, PARAM, TIMESTAMP))
    True
    >>> os.path.basename(f2.name) == os.path.basename(
    ... get_file_by_param_timestamp(RRD_PLOT_DIR, PARAM, TIMESTAMP))
    False
    >>> os.path.basename(f1.name) == os.path.basename(
    ... get_file_by_param_timestamp(RRD_PLOT_DIR, PARAM, TIMESTAMP))
    False
    >>> f1.close()
    >>> f2.close()
    >>> f3.close()
    >>> os.unlink(f1.name)
    >>> os.unlink(f2.name)
    >>> os.unlink(f3.name)
    '''
    try:
        file_name = str(os.path.pardir + PATH_SEPARATOR +
                get_specific_files(path, parameter + NAME_CONNECTOR +
                                                            str(timestamp))[0])
    except IndexError:
        file_name = None
    return file_name



def write_general_metrics(f_index, user):
    ''' Write the list of databases from db_dir in the html template.
    '''
        
    f_index.write((' ' * NR_INNER_SPACES + HEADER_TAG + END_LINE) %
                                                        GEN_KEY)
    f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)

    metrics_data = user.fetch_metrics_with_order('sid')
    
    
    
    f_index.write(' ' * NR_INNER_SPACES + TABLE_START_TAG + END_LINE)
    # tr
    f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
    # table header
    for parameter in METRICS_TABLE_HEADER:
        f_index.write((' ' * NR_INNER_SPACES + TH_TAG_ALIGNED + END_LINE) %
                                                            parameter)
    # tr
    f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
    # table contents
    for (sid,starting,dns,http,tcp,plt) in reversed(metrics_data):
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
        # td for each column in a row
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                str(sid) + TD_END_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                str(starting) + TD_END_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                str("%.3f" % dns) + TD_END_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                str("%.3f" % http) + TD_END_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                str("%.3f" % tcp) + TD_END_TAG + END_LINE)
        if plt > 5000:
            f_index.write((' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED + FONT_RED_TAG +
                        TD_END_TAG + END_LINE) % (str(int(plt))+' ms'))
        else:
            f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(int(plt))+' ms'  + TD_END_TAG + END_LINE)
        
#             f_index.write((' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED + A_TAG
#                            + TD_END_TAG + END_LINE) % (url, url[0:5]))
#             
#             water_fall = 'http://firelog.eurecom.fr/salvo/gen_har_by_form_sa.php?sid='+str(sid)+'&tbl=a_diagnose_test&Submit2=Waterfall+graph'
#             f_index.write((' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED + A_TAG
#                            +  TD_END_TAG + END_LINE) % (water_fall,'HAR'))
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
    # table div
    f_index.write(' ' * NR_INNER_SPACES + TABLE_END_TAG + END_LINE)
    
    #f_index.write(' ' * NR_INNER_SPACES + UL_END_TAG + END_LINE)



def write_database_archive(f_index, db_dir):
    ''' Write the list of databases from db_dir in the html template.
    '''
    # if a history of plots needs to be kept, the existent databases are
    # displayed with links that represent parameters;
    # in web.py the parameters can be retrieved as mentioned in:
    # http://webpy.org/cookbook/input
    f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    # the column header
    f_index.write((' ' * NR_INNER_SPACES + HEADER_TAG + END_LINE) % DB_KEY)
    # the list of databases
    # the start of the list of links
    f_index.write(' ' * NR_INNER_SPACES + UL_NOSTYLE_START_TAG + END_LINE)
    # the links
    for database in get_specific_files(db_dir, DB_EXTENSION):
        # function is called from pyanalyze directory, the image dir is
        # subfolder of parent directory
        f_index.write((' ' * NR_INNER_SPACES + LI_START_TAG + A_TAG +
                       END_LINE) % (DB_INPUT + database,
                                    os.path.basename(database)))
        f_index.write(' ' * NR_INNER_SPACES + LI_END_TAG + END_LINE)
    # the end of the list of databases
    f_index.write(' ' * NR_INNER_SPACES + UL_END_TAG + END_LINE)



def write_middle_column(f_index, timestamp, links, user, *parameters):
    ''' Function to write the header and contents of the middle column - plots
    for the *parameters or the table with the links to the browsed websites
    '''
    # TODO: temporary, write the column div outside to also put the pdf path
    # the column div
    #f_index.write((' ' * NR_DIV_SPACES + DIV_START_TAG + END_LINE) %
    #                                                        MID_COL_NAME)
    # if only the database archive is displayed
    
    
    print('Write middle column ->',parameters)
    
    # if there are not enough records in the db to display plots
    #if not timestamp and parameters[0] != GEN_KEY:
    if not timestamp :
        # the column header
        f_index.write((' ' * NR_INNER_SPACES + HEADER_TAG + END_LINE) %
                                                            MID_COL_HEADER)
        f_index.write((' ' * NR_INNER_SPACES + P_TAG + END_LINE) %
                            DB_NO_RECORDS_MSG)
        # the column div
        f_index.write(' ' * NR_DIV_SPACES + DIV_END_TAG + END_LINE)
        return
    
    if parameters[0] == GEN_KEY:
        write_general_metrics(f_index, user)
        # the column div
        f_index.write(' ' * NR_DIV_SPACES + DIV_END_TAG + END_LINE)
        return
    
    # the column header
    
    # check which graphs to display
#     if parameters[0] not in [URLS_KEY, DB_KEY]:
#         param_to_display = parameters
#     else:
#         param_to_display = ALL_PLOTS[MAIN_KEY]
    
    
    # the middle column consists of plots for the parameters
    # the graphs
#     for parameter in param_to_display:
#         f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
#         # function is called from pytomo directory, the image dir is
#         # a subfolder of parent directory
#         #f_index.write((' ' * NR_INNER_SPACES + IMG_TAG + END_LINE) %
#         #         (plot_path_to_write_in_html(parameter, timestamp), parameter))
#         f_index.write((' ' * NR_INNER_SPACES + P_TAG + END_LINE) %
#                     MAN_PLOTS[parameter])
#         f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    elif parameters[0] == DB_KEY:
        # the middle column consists of the main plots and a table with links to
        # the crawled videos        
        # table div
        rows = user.count_rows()
        
        f_index.write((' ' * NR_INNER_SPACES + HEADER_TAG + END_LINE) % DB_KEY)
        f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
        
        
        
        TEXT_TO_PRINT = 'Your database contains ' + str(rows) + ' browsing sessions.'
        f_index.write((' ' * NR_INNER_SPACES + HEADER_TAG + END_LINE) % TEXT_TO_PRINT)
        
        f_index.write(' ' * NR_INNER_SPACES + TABLE_START_TAG + END_LINE)
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
        
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                    'Click on this link to reset your database....  ' + TD_END_TAG + END_LINE)
        
        reset_db = 'http://localhost/reset_db.php?db='+str(user.get_clientID())
        f_index.write((' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED + A_TAG
                           +  TD_END_TAG + END_LINE) % (reset_db,'RESET DATABASE'))
        
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
        # table div
        f_index.write(' ' * NR_INNER_SPACES + TABLE_END_TAG + END_LINE)
        
        #f_index.write((' ' * NR_DIV_SPACES + DIV_START_TAG + 'Click on this link to DELETE ALL THE RECORD FROM YOUR DATABASE' + END_LINE) )
        # the column header
        #f_index.write((' ' * NR_INNER_SPACES + HEADER_TAG + END_LINE) %
        #                                                        LEFT_COL_HEADER)
        
        
        
        
        
        f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
        #f_index.write((' ' * NR_INNER_SPACES + A_TAG + END_LINE) %
        #              (GEN_KEY + DB_INPUT + database, GEN_KEY))
    
    
    elif parameters[0] == URLS_KEY:
        # the middle column consists of the main plots and a table with links to
        # the crawled videos        
        # table div
        f_index.write((' ' * NR_INNER_SPACES + HEADER_TAG + END_LINE) % URLS_KEY)
        f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    
        
#         f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
#         DIV_PIE_CHART_DIAGNOSE = ''.join(("<div id='chart_div' name='diagnose' data='Task, Sessions,"
#                                   "Client Side,", str(len(diagnosis_data[0])),
#                                   ",Server Side,", str(len(diagnosis_data[1])),
#                                   ",Local Access," , str(len(diagnosis_data[2])+len(diagnosis_data[3])) ,
#                                   ",Wild Internet,",str(len(diagnosis_data[4])),
#                                   "' style='height: 200px'></div>"))
#         f_index.write(' ' * NR_INNER_SPACES + DIV_PIE_CHART_DIAGNOSE + END_LINE)
#         
     
     
#         
#         f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
#         DIV_TEST = "<div id='xxxx' class='har' data-har='http://firelog.eurecom.fr/har_file/test_by_apache.harp' height='500px'></div>"
#         f_index.write(' ' * NR_INNER_SPACES + DIV_TEST + END_LINE)
#         f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
#         
        
        urls_data = user.fetch_browsed_with_order('session_start')
        
              
        
        f_index.write(' ' * NR_INNER_SPACES + TABLE_START_TAG + END_LINE)
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
        # table header
        for parameter in BROWSED_TABLE_HEADER:
            f_index.write((' ' * NR_INNER_SPACES + TH_TAG_ALIGNED + END_LINE) %
                                                                parameter)
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
        # table contents
        for (sid, session_start,url,plt) in reversed(urls_data):
            # tr
            f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
            # td for each column in a row
            f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                    str(sid) + TD_END_TAG + END_LINE)
            f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                    str(session_start) + TD_END_TAG + END_LINE)
            f_index.write((' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED + A_TAG
                           + TD_END_TAG + END_LINE) % (url, url[0:40]))
            if plt > 5000:
                f_index.write((' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED + FONT_RED_TAG +
                    TD_END_TAG + END_LINE) % str(int(plt)))
            else:
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                    str(int(plt)) + TD_END_TAG + END_LINE)
            
            #water_fall = 'http://firelog.eurecom.fr/salvo/gen_har_by_form_sa.php?sid='+str(sid)+'&tbl=a_diagnose_test&Submit1=HAR+Schema'
            water_fall = 'http://localhost/gen_har.php?sid='+str(sid)+'&tbl=plugin_raw_sid'
            f_index.write((' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED + A_TAG
                           +  TD_END_TAG + END_LINE) % (water_fall,'HAR Viewer'))
            # tr
            f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
        # table div
        f_index.write(' ' * NR_INNER_SPACES + TABLE_END_TAG + END_LINE)
        
        
        
#         To print pdf --> Not Working
#         f_index = open('/home/salvo/workspace/PyAnalyze-0.2/templates/284676141/213343835.1340129392.General_Metrics_index2.html','r+')
#         read_data = f_index.read()
#         html_text = read_data
#         print(html_text)
#         create_pdf('pdfs/http_HAR.pdf', timestamp, html_text)
#         #print('test_html')
        
    
    elif parameters[0] == DIAGNOSE_KEY:
        # the middle column consists of the main plots and a table with links to
        # the crawled videos
        # table div
        f_index.write((' ' * NR_INNER_SPACES + HEADER_TAG + END_LINE) %
                                                            DIAGNOSE_KEY)
        f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
        
        # diagnosis_data[0] client gap
        # diagnosis_data[1] server side
        # diagnosis_data[2] local(Contribution)
        # diagnosis_data[3] local(Normalized)
        # diagnosis_data[4] Wild Internet
        diagnosis_data = user.fetch_diagnosis_results()
        
        
        
        f_index.write((' ' * NR_INNER_SPACES + HEADER_TAB_TAG) % 'Limitation Causes for High PLT')
        f_index.write(' ' * NR_INNER_SPACES + TABLE_START_TAG + END_LINE)
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
        # table header
        for parameter in DIAGNOSE_RESUME_HEADER:
            f_index.write((' ' * NR_INNER_SPACES + TH_TAG_ALIGNED + END_LINE) % parameter)
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
        
        # table contents
        rows = user.count_rows_results()
        #print(rows)
        # FIRST ROW = NR SID
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
        # td for each column in a row
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                'Nr. Session' + TD_END_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                str(len(diagnosis_data[0])) + TD_END_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                str(len(diagnosis_data[1])) + TD_END_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                str(len(diagnosis_data[2])+len(diagnosis_data[3])) + TD_END_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                str(len(diagnosis_data[4])) + TD_END_TAG + END_LINE)
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
        
        # SECOND ROW = PERC. %
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
        # td for each column in a row
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                'Perc.' + TD_END_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                str("%.1f" % (len(diagnosis_data[0])*100/rows))+""'%'"" + TD_END_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                str("%.1f" % (len(diagnosis_data[1])*100/rows))+""'%'"" + TD_END_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                str("%.1f" % (   (len(diagnosis_data[2])+len(diagnosis_data[3]))*100/rows  ))  +""'%'"" + TD_END_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                str("%.1f" % (len(diagnosis_data[4])*100/rows))+""'%'"" + TD_END_TAG + END_LINE)
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
        
        
        
        # table div
        f_index.write(' ' * NR_INNER_SPACES + TABLE_END_TAG + END_LINE)
        
        
        
        f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
        DIV_PIE_CHART_DIAGNOSE = ''.join(("<div id='chart_div' name='diagnose' data='Task, Sessions,"
                                  "Client Side,", str(len(diagnosis_data[0])),
                                  ",Server Side,", str(len(diagnosis_data[1])),
                                  ",Local Access," , str(len(diagnosis_data[2])+len(diagnosis_data[3])) ,
                                  ",Wild Internet,",str(len(diagnosis_data[4])),
                                  "' style='height: 200px'></div>"))
        f_index.write(' ' * NR_INNER_SPACES + DIV_PIE_CHART_DIAGNOSE + END_LINE)
            
        # Client Issues table
        if diagnosis_data[0]:
            f_index.write((' ' * NR_INNER_SPACES + HEADER_TAB_TAG) % 'Client Side Limitation')
            f_index.write(' ' * NR_INNER_SPACES + TABLE_START_TAG + END_LINE)
            # tr
            f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
            # table header
            for parameter in DIAGNOSE_TABLE_HEADER_CLIENTGAP:
                f_index.write((' ' * NR_INNER_SPACES + TH_TAG_ALIGNED + END_LINE) % parameter)
            # tr
            f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
            # table contents
            for (sid, session_start,plt,issue) in reversed(diagnosis_data[0]):
                # tr
                f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
                # td for each column in a row
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(sid) + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(session_start) + TD_END_TAG + END_LINE)
                if plt > 5000:
                    f_index.write((' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED + FONT_RED_TAG +
                        TD_END_TAG + END_LINE) % (str(plt)+' ms'))
                else:
                    f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(plt)+' ms'  + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        issue+' ms' + TD_END_TAG + END_LINE)
                # tr
                f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
            # table div
            f_index.write(' ' * NR_INNER_SPACES + TABLE_END_TAG + END_LINE)
    
        
        
        # Server side Issues table
        if diagnosis_data[1]:
            f_index.write((' ' * NR_INNER_SPACES + HEADER_TAB_TAG) % 'Server Side Limitation')
            f_index.write(' ' * NR_INNER_SPACES + TABLE_START_TAG + END_LINE)
            # tr
            f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
            # table header
            for parameter in DIAGNOSE_TABLE_HEADER_SERVER:
                f_index.write((' ' * NR_INNER_SPACES + TH_TAG_ALIGNED + END_LINE) % parameter)
            # tr
            f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
            # table contents
            for (sid, session_start,plt,issue) in reversed(diagnosis_data[1]):
                # tr
                f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
                # td for each column in a row
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(sid) + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(session_start) + TD_END_TAG + END_LINE)
                if plt > 5000:
                    f_index.write((' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED + FONT_RED_TAG +
                        TD_END_TAG + END_LINE) % (str(plt)+' ms'))
                else:
                    f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(plt)+' ms'  + TD_END_TAG + END_LINE)
                
                # Parse the result string    
                issue_1 = issue[0:7]
                index2 = string.find(issue, "=")
                issue_2 = issue[index2+2:index2+8]
                index3 = string.find(issue, "=", index2+1)
                issue_3 = issue[index3+2:index3+7]
                
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        issue_1 + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        issue_2 + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        issue_3 + TD_END_TAG + END_LINE)
                # tr
                f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
            # table div
            f_index.write(' ' * NR_INNER_SPACES + TABLE_END_TAG + END_LINE)
        
        
        
        # Local Contribution Issues table
        if diagnosis_data[2]:
            f_index.write((' ' * NR_INNER_SPACES + HEADER_TAB_TAG) % 'Local Access Limitation')
            f_index.write(' ' * NR_INNER_SPACES + TABLE_START_TAG + END_LINE)
            # tr
            f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
            # table header
            for parameter in DIAGNOSE_TABLE_HEADER_LOCAL:
                f_index.write((' ' * NR_INNER_SPACES + TH_TAG_ALIGNED + END_LINE) % parameter)
            # tr
            f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
            # table contents
            for (sid, session_start,plt,issue) in reversed(diagnosis_data[2]):
                # tr
                f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
                # td for each column in a row
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(sid) + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(session_start) + TD_END_TAG + END_LINE)
                if plt > 5000:
                    f_index.write((' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED + FONT_RED_TAG +
                        TD_END_TAG + END_LINE) % (str(plt)+' ms'))
                else:
                    f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(plt)+' ms'  + TD_END_TAG + END_LINE)
                
                # Parse the result string    
                index_1 = string.find(issue, ",")
                avg_Curr_TCP = float(issue[0:index_1])
                
                index2 = string.find(issue, "=")
                index3 = string.find(issue, ",", index2+3)
                
                avg_Near_Host_TCPs = float(issue[index2+2:index3])
                
                F2 = avg_Near_Host_TCPs/avg_Curr_TCP 
                 
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str("%.3f" % avg_Curr_TCP) + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str("%.3f" % avg_Near_Host_TCPs) + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str("%.3f" % F2) + TD_END_TAG + END_LINE)
                
                # tr
                f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
            # table div
            f_index.write(' ' * NR_INNER_SPACES + TABLE_END_TAG + END_LINE)
        
        
        # Local Normalize Issues table
        if diagnosis_data[3]:
            f_index.write((' ' * NR_INNER_SPACES + HEADER_TAB_TAG) % 'Local Access Limitation - Computed by STD')
            f_index.write(' ' * NR_INNER_SPACES + TABLE_START_TAG + END_LINE)
            # tr
            f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
            # table header
            for parameter in DIAGNOSE_TABLE_HEADER_LOCAL_STD:
                f_index.write((' ' * NR_INNER_SPACES + TH_TAG_ALIGNED + END_LINE) % parameter)
            # tr
            f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
            # table contents
            for (sid, session_start,plt,issue) in reversed(diagnosis_data[3]):
                # tr
                f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
                # td for each column in a row
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(sid) + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(session_start) + TD_END_TAG + END_LINE)
                if plt > 5000:
                    f_index.write((' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED + FONT_RED_TAG +
                        TD_END_TAG + END_LINE) % (str(plt)+' ms'))
                else:
                    f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(plt)+' ms'  + TD_END_TAG + END_LINE)
                
                # Parse the result string    
                index_1 = string.find(issue, ",")
                avg_Curr_TCP = float(issue[0:index_1])
                
                index2 = string.find(issue, "=")
                index3 = string.find(issue, ",", index2+3)
                
                avg_Near_Host_TCPs = float(issue[index2+2:index3])
                
                index4 = string.find(issue, "=", index3)
                index5 = string.find(issue, ",", index4+3)
                
                std_Near_Host_TCPs = float(issue[index4+2:index5])
                
                F1 = (avg_Curr_TCP - avg_Near_Host_TCPs)/std_Near_Host_TCPs 
                 
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str("%.3f" % avg_Curr_TCP) + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str("%.3f" % avg_Near_Host_TCPs) + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str("%.3f" % F1) + TD_END_TAG + END_LINE)
                
                # tr
                f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
            # table div
            f_index.write(' ' * NR_INNER_SPACES + TABLE_END_TAG + END_LINE)
        
        
        # Wild Internet Issues table
        if diagnosis_data[4]:
            f_index.write((' ' * NR_INNER_SPACES + HEADER_TAB_TAG) % 'Wild Internet Limitation')
            f_index.write(' ' * NR_INNER_SPACES + TABLE_START_TAG + END_LINE)
            # tr
            f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
            # table header
            for parameter in DIAGNOSE_TABLE_HEADER_INTERNET:
                f_index.write((' ' * NR_INNER_SPACES + TH_TAG_ALIGNED + END_LINE) % parameter)
            # tr
            f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
            # table contents
            for (sid, session_start,plt,issue) in reversed(diagnosis_data[4]):
                # tr
                f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
                # td for each column in a row
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(sid) + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(session_start) + TD_END_TAG + END_LINE)
                if plt > 5000:
                    f_index.write((' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED + FONT_RED_TAG +
                        TD_END_TAG + END_LINE) % (str(plt)+' ms'))
                else:
                    f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str(plt)+' ms'  + TD_END_TAG + END_LINE)
                
                # Parse the result string    
                index_1 = string.find(issue, ",")
                avg_Curr_TCP = float(issue[0:index_1])
                
                index2 = string.find(issue, "=")
                index3 = string.find(issue, ",", index2+3)
                
                avg_Near_Host_TCPs = float(issue[index2+2:index3])
                
                index4 = string.find(issue, "=", index3)
                index5 = string.find(issue, ",", index4+3)
                
                std_Near_Host_TCPs = float(issue[index4+2:index5])
                
                F1 = (avg_Curr_TCP - avg_Near_Host_TCPs)/std_Near_Host_TCPs
                F2 = avg_Near_Host_TCPs/avg_Curr_TCP
                 
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str("%.3f" % avg_Curr_TCP) + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str("%.3f" % avg_Near_Host_TCPs) + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str("%.3f" % F1) + TD_END_TAG + END_LINE)
                f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG_ALIGNED +
                        str("%.3f" % F2) + TD_END_TAG + END_LINE)
                
                # tr
                f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
            # table div
            f_index.write(' ' * NR_INNER_SPACES + TABLE_END_TAG + END_LINE)
        
        
        
        # the start of the Appendix
        f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
        f_index.write((' ' * NR_INNER_SPACES + HEADER_TAG + END_LINE) % 'Appendix:')
        F1 = "<img src='../images/F1.png' alt='F1 Math'  />"
        F2 = "<img src='../images/F2.png' alt='F2 Math'  />"
        
        f_index.write(' ' * NR_INNER_SPACES + F1 + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + F2 + END_LINE)
        
    
    
    
    else:
        f_index.write((' ' * NR_INNER_SPACES + HEADER_TAG + END_LINE) %
                                                            parameters)
    
    
    # the column div
    f_index.write(' ' * NR_DIV_SPACES + DIV_END_TAG + END_LINE)


    
def write_left_column(f_index, database):
    ''' Function to write the header and contents of the left column - links
    '''
    # the column div
    f_index.write((' ' * NR_DIV_SPACES + DIV_START_TAG + END_LINE) %
                                                            LEFT_COL_NAME)
    # the column header
    f_index.write((' ' * NR_INNER_SPACES + HEADER_TAG + END_LINE) %
                                                            LEFT_COL_HEADER)
    # the start of the list of links
    
    # Modified for student VM
    #f_index.write(' ' * NR_INNER_SPACES + UL_START_TAG + END_LINE)
    # the links
    
    
    #print('write left col')
    
#    for parameter in sorted(ALL_PLOTS.keys()):
#        # function is called from pytomo directory, the image dir is subfolder
#        # of parent directory
#        f_index.write((' ' * NR_INNER_SPACES + LI_START_TAG + A_TAG +
#                        UL_START_TAG + END_LINE) % (parameter + DB_INPUT +
#                                                    database, parameter))
#        # all the parameters are already present in the other keys
#        if parameter != ALL_KEY and parameter != MAIN_KEY:
#            for mapping in ALL_PLOTS[parameter]:
#                f_index.write((' ' * NR_INNER_SPACES + LI_START_TAG + A_TAG +
#                    LI_END_TAG + END_LINE) % (mapping + DB_INPUT + database,
#                                              mapping))
#        f_index.write(' ' * NR_INNER_SPACES + UL_END_TAG + LI_END_TAG +
#                      END_LINE)
    
    
    # the link to the list of general metrics
    
    
    
    
    f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    f_index.write((' ' * NR_INNER_SPACES + A_TAG + END_LINE) %
                  (GEN_KEY + DB_INPUT + database, GEN_KEY))

    # the link to the table that displays Http Tracing Logs
    f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    f_index.write((' ' * NR_INNER_SPACES + A_TAG + END_LINE) %
                  (URLS_KEY + DB_INPUT + database, URLS_KEY))
    
    ##############################################################################
    # the link to root causes analysis results
    #f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    #f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    #f_index.write((' ' * NR_INNER_SPACES + A_TAG + END_LINE) %
    #              (DIAGNOSE_KEY + DB_INPUT + database, DIAGNOSE_KEY))
    
    
    # the link to Statistics
    #f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    #f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    #f_index.write((' ' * NR_INNER_SPACES + A_TAG + END_LINE) %
    #             (STAT_KEY + DB_INPUT + database, STAT_KEY))
              
    # the link to Graphs 
    #f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    #f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    #f_index.write((' ' * NR_INNER_SPACES + A_TAG + END_LINE) %
    #              (GRAPHS_KEY + DB_INPUT + database, GRAPHS_KEY))
    
    # the link to CPU rate
    #f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    #f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    #f_index.write((' ' * NR_INNER_SPACES + A_TAG + END_LINE) %
    #              (CPU_SIGNAL + DB_INPUT + database, CPU_SIGNAL))
    
    
    # the link to the list that displays the existent database archive
    f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    f_index.write((' ' * NR_INNER_SPACES + A_TAG +  END_LINE) %
                  (DB_KEY + DB_INPUT + database, DB_KEY))
    ##############################################################################
    
    
    # the end of the list of links
    #f_index.write(' ' * NR_INNER_SPACES + UL_END_TAG + END_LINE)
    
    # the column div
    f_index.write(' ' * NR_DIV_SPACES + DIV_END_TAG + END_LINE)

def write_right_column(f_index, average_values):
    ''' Function to write the header and contents of the right column - tables
    containing the average values determined by the crawl and the list of
    existent databases.
    '''
    if not average_values:
        print('No data has been received to create the table')
        return
    # the column div
    f_index.write((' ' * NR_DIV_SPACES + DIV_START_TAG + END_LINE) %
                                                            RIGHT_COL_NAME)
    # the column header
    f_index.write((' ' * NR_INNER_SPACES + HEADER_TAG + END_LINE) %
                                                            RIGHT_COL_HEADER)
    
    f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    
    # the table including the average parameters
    # table div
    f_index.write(' ' * NR_INNER_SPACES + TABLE_START_TAG + END_LINE)
    # tr
    f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
    # table header
    for header in RIGHT_COL_TABLE_HEADER:
        f_index.write((' ' * NR_INNER_SPACES + TH_TAG + END_LINE) % header)
    # tr
    f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
    # table contents
    
    
    
    #print('write right col')
    
    
    
    for parameter, value in zip(LOCAL_PARAM_DESCRIPTION, LOCAL_PARAMETERS_VALUES):
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_START_TAG + END_LINE)
        # td
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG + parameter +
                    TD_END_TAG + END_LINE)
        f_index.write(' ' * NR_INNER_SPACES + TD_START_TAG + END_LINE + ' ' * (NR_INNER_SPACES+4) +
                      value + END_LINE + ' ' * NR_INNER_SPACES + TD_END_TAG + END_LINE)
        # tr
        f_index.write(' ' * NR_INNER_SPACES + TR_END_TAG + END_LINE)
        
    #print(idtest)       
        
    # table div
    f_index.write(' ' * NR_INNER_SPACES + TABLE_END_TAG + END_LINE)
    
    f_index.write(' ' * NR_INNER_SPACES + BR_TAG + END_LINE)
    
    timestamp = None
    #if timestamp:
    if True:
            # TODO: function is not properly checked, this must be removed
        try:
                
            # rel_pdf_tag = 'pdfs/formulario.pdf'
            # rel_pdf_name = 'pdf_download'
            
            # pdf_name = pdf_filename(parameter, timestamp)
            pdf_name = 'pdfs/pdf_prova.pdf'
            rel_pdf_name = HTTP_CONNECTOR.join((config_pyanalyze.PDF_DIR, os.path.basename(pdf_name)))
            # create_pdf(pdf_name, timestamp, average_values)    
            #f_index.write((' ' * NR_INNER_SPACES + P_TAG + END_LINE) %
            #                  PDF_MSG)
            #f_index.write((' ' * NR_INNER_SPACES + A_TAG + END_LINE) %
            #                  (rel_pdf_name, rel_pdf_name))
                
        except Exception, mes:
            print('Could not create PDF, error: %s' % rel_pdf_name)
    
    
    # the column div
    f_index.write(' ' * NR_DIV_SPACES + DIV_END_TAG + END_LINE)

def write_end_div_refresh(f_index, database):
    # the column div colleft
    f_index.write(' ' * NR_DIV_SPACES + DIV_END_TAG + END_LINE)
    # the column div colmid
    f_index.write(' ' * NR_DIV_SPACES + DIV_END_TAG + END_LINE)
    # the column div colmask threecol
    f_index.write(' ' * NR_DIV_SPACES + DIV_END_TAG + END_LINE)
    # if the database was modified recently, should refresh page
#     if (time.time() - os.path.getmtime(database)) < REFRESH_TIMEOUT:
#         f_index.write(ONLOAD_REFRESH_SCRIPT)





def write_index(timestamp, database):
    ''' Function to create the parameter_timestamp_index.html from template
    files and include the images that also contain a specific timestamp.
    '''
#     # if database is not empty
#     if not timestamp:
#         # get the data from the database
#         # retrieve data on which average parameters and video links depend on
#         print('if timestamp 1')
#         try:
#             avg_data = lib_database.PyAnalyzeDatabase(database).\
#                                         fetch_all_parameters(AVERAGE_PARAM)
#         except:
#             print('Unable to extract data %s with error:'
#                                     '%s' % (str(AVERAGE_PARAM), 'errore'))
#         try:
#             links_data = lib_database.PyAnalyzeDatabase(database).\
#                                         fetch_all_parameters(URL_PARAM)
#             # assumes all crawled links are from the same service (hardcoded)
#             SERVICE = links_data[0][SERVICE_POS_IN_URL]
#         except:
#             print('Unable to extract data %s with error:'
#                                     '%s' % (str(URL_PARAM), 'errore'))
#     else:
    
    links_data = None
    avg_data = None
        
    
    # open all the file descriptors (start_template and end_template)
    try:
        f_s_template = open(os.path.join(config_pyanalyze.TEMPLATES_DIR_STATIC,
                                         START_TEMPLATE_NAME), 'r')
    except IOError:
        print('Problem opening file %s' %
                            os.path.join(config_pyanalyze.TEMPLATES_DIR_STATIC,
                                         START_TEMPLATE_NAME))
        return
    try:
        f_e_template = open(os.path.join(config_pyanalyze.TEMPLATES_DIR_STATIC,
                                         END_TEMPLATE_NAME), 'r')
    except IOError:
        print('Problem opening file %s' %
                                os.path.join(config_pyanalyze.TEMPLATES_DIR_STATIC,
                                             END_TEMPLATE_NAME))
        return
    
    # create a different index.html for each parameter, with specific timestamp
    # first the elements in ALL_PLOTS.keys()
    # then the elements in UNITS.keys()
    # then URLS_KEY, DB_KEY
    f_param = []
    for param in chain(ALL_PAGES):
        try:
            f_name = index_filename(param, timestamp)
        except IOError:
            print('Problem opening index file for parameter'
                                    ' %s' % param)
            return
        f_index = open(f_name, 'w')
        #print(f_name)
        f_param.append(f_index)
    # retrieve the header lines
    header_lines = f_s_template.readlines()
    # retrieve the footer lines
    footer_lines = f_e_template.readlines()
    # add the style and header of the page to each index
    for f_index in f_param:
        for line in header_lines:
            f_index.write(line)
    if timestamp:
        print('YES TIMESTAMP',timestamp)
        #avg_values = tuple(chain(*((SERVICE,),compute_average_values(avg_data))))
    avg_values = [1,2,3,4,5,6,7,8]    
    
    
    user = PyAnalyzeDatabase(config_pyanalyze.CLIENTID)
        
    # add the body of the page
    # middle column - plots or table with the links to crawled videos or
    # database archive
    # parameter represents either a list (a key in the dictionary ALL_PLOTS),
    # a single parameter that can be plotted or the URLS_KEY / DB_KEY
    # if anything else is given, the main graphs are plotted
    for parameter, f_index in \
                        zip(ALL_PAGES, 
                            f_param[:len(ALL_PAGES)]):
        # TODO: must be moved back to middle column
        # the middle column div
        f_index.write((' ' * NR_DIV_SPACES + DIV_START_TAG + END_LINE) %
                                                                MID_COL_NAME)
        write_middle_column(f_index, timestamp, links_data, user, parameter)
    
#     for parameter, f_index in zip(chain(ALL_PLOTS[ALL_KEY],[URLS_KEY, DB_KEY]), f_param[len(ALL_PLOTS.keys()):]):
#         # TODO: must be moved back to middle column
#         # the middle column div
#         f_index.write((' ' * NR_DIV_SPACES + DIV_START_TAG + END_LINE) %
#                                                                 MID_COL_NAME)
#         print('Write_middle_2',zip(chain(ALL_PLOTS[ALL_KEY],[URLS_KEY, DB_KEY]), f_param[len(ALL_PLOTS.keys()):]))
#         write_middle_column(f_index, timestamp, links_data, parameter)
    # add the left, right columns and the footer to each index
    
       
    for f_index in f_param:
        # left column - links to plots
        write_left_column(f_index, database)
        # right column - average values
        if timestamp:
            write_right_column(f_index, avg_values)
        # check if page should be automatically refreshed
        write_end_div_refresh(f_index, database)
        # add the footer of the page
        for line in footer_lines:
            f_index.write(line)
        # close the file object
        f_index.close()
    f_s_template.close()
    f_e_template.close()
    
    

    

def main(argv=None):
    "Program wrapper"
    print('lib_render started')
    
#     if not config_pyanalyze.LOG:
#         logger_io()
#     
    return 0

if __name__ == '__main__':
    import doctest
    doctest.testmod()
    sys.exit(main())
