'''
Created on Feb 4, 2015

@author: dewey
'''

FORMAT_AUTO = "auto_format"
FORMAT_PYSTRAT_TEXT = "pystrat_text"
EXTENSION_PYSTRAT_TEXT = ".pdt"
FORMAT_CSV = "pystrat_csv"
EXTENSION_CSV = ".csv"
FORMAT_TSV = "pystrat_tsv"
EXTENSION_TSV = "pystrat_tsv"

FORMAT_R_MATRIX = "r_matrix"

DEFAULT_ENCODING = "UTF-8"

import csv
from io import StringIO

from pystrat.dataprocessing import data_table_all, data_table_averaged
#need to imoport all for proper eval() envrionment
from pystrat.datamodel import *
from pystrat.project import *

def __type_from_extension(filename):
    if filename.endswith(EXTENSION_PYSTRAT_TEXT):
        return FORMAT_PYSTRAT_TEXT
    elif filename.endswith(EXTENSION_CSV):
        return FORMAT_CSV
    elif filename.endswith(EXTENSION_TSV):
        return FORMAT_TSV
    else:
        return None

def open_file(filename, file_format=FORMAT_AUTO, encoding=DEFAULT_ENCODING, csv_options=None):
    if file_format == FORMAT_AUTO:
        file_format = __type_from_extension(filename)
    if file_format is None:
        raise ValueError("No file format specified!")
    
    fd = open(filename, "r", encoding=encoding)
    project = None
    if file_format == FORMAT_PYSTRAT_TEXT:
        for line in fd:
            project = eval(line)
    elif file_format == FORMAT_CSV:
        if csv_options is None:
            csv_options = {}
        fobject = open(filename, 'r', encoding=encoding)
        csv_reader = csv.reader(fobject)
        project = __import_csv(csv_reader, **csv_options)
        fobject.close()
    fd.close()    
    return project

def open_from_text_data(text_data, csv_options=None):
    if csv_options is None:
        csv_options = {}
    #this is pretty crude, should probably use something like a csv reader or html parsing to pull this off
    text_rows = text_data.replace("\r\n","\n").replace("\r","\n").split("\n")
    rows = []
    for text_row in text_rows:
        cols = text_row.split("\t")
        rows.append(cols)
    print(rows)
    project = __import_csv(rows, **csv_options)
    return project
    

def save_file(proxy_project, filename, file_format=FORMAT_AUTO, encoding=DEFAULT_ENCODING):
    if file_format == FORMAT_AUTO:
        file_format = __type_from_extension(filename)
    if file_format is None:
        raise ValueError("No file format specified!")
    
    fd = open(filename, "w", encoding=encoding)
    if file_format == FORMAT_PYSTRAT_TEXT:
        fd.write(repr(proxy_project))
    elif file_format == FORMAT_CSV:
        raise NotImplementedError("CSV saving not implemented")
    fd.close()
    

def __extract_unit(field_name):
    first_par = field_name.find("(")
    end_par = field_name.find(")")
    if first_par != -1 and end_par != -1:
        unit = field_name[first_par+1:end_par]
        name = field_name[:first_par].strip()
        return name, unit
    else:
        return field_name, None
    
VALUE_NO_DATA = float("nan")
    
def __import_csv(csv_reader_like, location_id_col=-1, depth_col=-1, age_col=-1, depth_unit='cm', age_unit='Year_AD',
               sample_id_col=-1, vdata_cols=None, vdata_ids=None, vdata_names=None,
                skip_lines=0, header_line=True, verbose=True, encoding=DEFAULT_ENCODING):
    '''Takes a csv file with optional parameters and turns it into a ProxyProject
    object. If no column information is provided, auto column names will be
    assumed (location_id, depth, sample_id, and other columns are assumed to be
    vdata names. Raises IOError if file cannot be opened. Tested with default
    parameters, non-default parameters have not been tested.'''
    

    
    project_out = PyStratProject()
    error = None
    current_location = None
    if not vdata_ids:
        vdata_ids = []
    if not vdata_names:
        vdata_names = []
    if not vdata_cols:
        vdata_cols = []
    vdata_units = []
        
    line_number = 1
    for fields in csv_reader_like:
        if verbose:
            print("line %s" % line_number)
        if skip_lines > 0:
            skip_lines -= 1
        else:
            
            if header_line:
                if verbose: print("processing header line")
                for index in range(len(fields)):
                    field_name = fields[index].strip()
                    if verbose:
                        print("reading field name : " + field_name)
                    if field_name == "location_id" and location_id_col<0:
                        location_id_col = index
                        if verbose: print("found location_id column at index %s" % index)
                    elif field_name == "sample_id" and sample_id_col<0:
                        if verbose: print("found sample_id column at index %s" % index)
                        sample_id_col = index
                    elif field_name.startswith("depth") and depth_col<0:
                        if verbose: print("found depth column at index %s" % index)
                        name, unit = __extract_unit(field_name)
                        if unit:
                            depth_unit = unit
                        depth_col = index
                    elif field_name.startswith("age") and age_col<0:
                        if verbose: print("found age column at index %s" % index)
                        name, unit = __extract_unit(field_name)
                        if unit:
                            age_unit = unit
                        age_col = index
                    elif field_name:
                        if verbose: print("found proxy column at index %s" % index)
                        vdata_cols.append(index)
                        name, unit = __extract_unit(field_name)
                        vdata_id = field_name.lower().replace(" ", "_")
                        vdata_ids.append(vdata_id)
                        vdata_names.append(name)
                        vdata_units.append(unit)
                header_line = False
            else:
                depth = -1
                if depth_col < 0 and age_col < 0:
                    error = "Could not find depth or age column and no depth column specified"
                    break
                else:
                    age = None
                    depth = None
                    if depth_col >= 0:
                        try:
                            depth = float(fields[depth_col])
                        except ValueError:
                            if verbose: print("Depth value '%s' could not be converted to float." % fields[depth_col])
                    if age_col >=0:
                        try:
                            age = float(fields[age_col])
                        except ValueError:
                            if verbose: print("Aage value '%s' could not be converted to float." % fields[age_col])
                    
                    if age is None and depth is None:
                        if verbose: print("No age or depth value for this line. Skipping.")
                        line_number += 1
                        continue
                
                if sample_id_col >= 0:
                    sample_id = fields[sample_id_col].strip()
                else:
                    sample_id = None
                
                if location_id_col >= 0:
                    location_id_val = fields[location_id_col].strip()
                    if location_id_val:
                        if (current_location and location_id_val != current_location._id) or current_location is None:
                            if verbose: print("Creating new location with id %s" % location_id_val)
                            if not location_id_val in project_out.locations:
                                new_location=DataLocation(location_id_val)
                                new_location.age_depth_model = LinearAgeDepthModel(depth_unit=depth_unit, age_unit=age_unit)
                                project_out.add_location(new_location)
                            current_location = project_out.locations[location_id_val]
                if current_location is None:
                    current_location = DataLocation("default")
                    current_location.age_depth_model = LinearAgeDepthModel(depth_unit=depth_unit, age_unit=age_unit)
                    project_out.add_location(current_location)
                
                if (not age is None) and (not depth is None):
                    if verbose: print("Age and depth given, adding age/depth pair %s, %s" % (age, depth))
                    current_location.age_depth_model.add_agedepth(age, depth)
                
                for ind in range(len(vdata_cols)):
                    column_index = vdata_cols[ind]
                    vdata_id = vdata_ids[ind]
                    vdata_name = vdata_names[ind]
                    vdata_unit = vdata_units[ind]
                    if not vdata_id in current_location.vdata:
                        if verbose: print("Adding new vdaata object with vdata id %s in location_id %s" % (vdata_id, current_location._id))
                        newvdata = VerticalData(vdata_id)
                        newvdata.set_name(vdata_name)
                        newvdata.value_unit = vdata_unit
                        newvdata.key_unit = depth_unit

                        current_location.add_vdata(newvdata)
                    try:
                        string_val = fields[column_index].strip()
                        if len(string_val) > 0:
                            value = float(fields[column_index])
                        else:
                            value = None
                    except ValueError:
                        value = VALUE_NO_DATA
                    #if verbose: print("adding data at location %s, proxy %s, depth %s, value %s, sample_id %s" %
                    #                  (current_location.location_id, proxy_id, depth, value, sample_id))
                    if not value is None:
                        if not depth is None:
                            key = depth
                            unit = depth_unit
                        else:
                            key = age
                            unit = age_unit
                            if verbose: print("Adding age with unit %s" % unit)
                            
                        current_location.vdata[vdata_id].add_value(key, value, sample_id)
                        current_location.vdata[vdata_id].key_unit = unit
                    
        line_number += 1
    
    
    if error:
        raise ValueError(error)
    else:
        #clean output
        empty_locations_ids = []
        for location_id, location in project_out.locations.items():
            if not (len(location.age_depth_model.depths()) > 1):
                location.age_depth_model = None
            else:
                location.age_depth_model.auto_detect_unit()
                ad_vdata = location.age_depth_model.to_vdata()
                ad_vdata._id = VDATA_ID_AGEDEPTH
                ad_vdata.set_name("Age Depth Model")
                location.add_vdata(ad_vdata)
                
            if not location.vdata:
                empty_locations_ids.append(location_id)
                
            empty_vdata_ids = []
            for vdata_id, proxy in location.vdata.items():
                if len(proxy._data) == 0:
                    empty_vdata_ids.append(vdata_id)
            for vdata_id in empty_vdata_ids:
                del location.vdata[vdata_id]
            
        for location_id in empty_locations_ids:
            del project_out.locations[location_id]
        
        
        return project_out

def extract_age_depth_models(pystrat_project):
    models = {}
    for location_id, location in pystrat_project.locations.items():
        if location.age_depth_model:
            models[location_id] = location.age_depth_model
    return models


EXPORT_MODE_SUMMARY = "summary"
EXPORT_MODE_ALL = "all"

def export_vdata(vdata_list, filename=None, file_format=FORMAT_AUTO, encoding=DEFAULT_ENCODING, mode=EXPORT_MODE_SUMMARY):
    if filename:
        if file_format == FORMAT_AUTO:
            file_format = __type_from_extension(filename)
        fobject = open(filename, "w", encoding=encoding)
    else:
        fobject = StringIO()
    
    if file_format == FORMAT_CSV:
        __vdata_to_sv(fobject, vdata_list, delimeter=",", mode=mode, nan_hack="NA", none_hack="")
    elif file_format == FORMAT_TSV:
        __vdata_to_sv(fobject, vdata_list, delimeter="\t", mode=mode, nan_hack="NA", none_hack="")
    elif file_format == FORMAT_R_MATRIX:
        fobject.write("matrix(c(")
        headers, depths, sample_ids = __vdata_to_sv(fobject, vdata_list, write_headers=False, newline=", \n", delimeter=", ", write_sample_ids=False, text_delimeter='"', mode=mode, nan_hack="NA", none_hack="NA")
        colnames = ['"%s"' % header for header in headers]
        if sample_ids is None:
            rowname_vals = depths
        elif sample_ids.count(None) == len(sample_ids):
            rowname_vals = depths
        else:
            rowname_vals = sample_ids
        rownames = ['"%s"' % val for val in rowname_vals]
        
        names = "list(c(%s), \nc(%s))" % (", ".join(rownames), ", ".join(colnames)) #col names then row names
        fobject.write("), nrow=%s, ncol=%s, dimnames=%s, byrow=TRUE)" % (len(depths), len(colnames), names))
    elif file_format == FORMAT_PYSTRAT_TEXT:
        fobject.write(repr(vdata_list))
    else:
        raise ValueError("Unrecognized format or FORMAT_AUTO specified with no filename: %s" % file_format)
    
    if filename is None:
        val = fobject.getvalue()
        fobject.close()
        return val
    else:
        fobject.close()

def __vdata_to_sv(fobject, vdata_list, delimeter, mode, write_headers=True, text_delimeter="", write_depths=True, write_sample_ids=True, nan_hack="NA", none_hack="", newline="\n"):
    
    headers = []
    keyunits = set()
    locs = set()
    for vdata in vdata_list:
        print(vdata.key_unit)
        keyunits.add(vdata.key_unit)
        locs.add(vdata.metadata(METADATA_LOCATION_ID))
    
    if len(keyunits) > 1:
        raise ValueError("More than one key type (age and depth data mixed), cannot create data table.")
    keyunit = keyunits.pop()
    keycat = units.get_category(keyunit)
    if keycat == "time":
        keytype = "age (%s)" % keyunit
    elif keycat == "distance":
        keytype = "depth (%s)" % keyunit
    else:
        raise ValueError("Unrecognized key unit: %s" % keyunit)

    for vdata in vdata_list:
        if len(locs) > 1:
            name = "%s: %s" % (vdata.metadata(METADATA_LOCATION_ID), vdata.name())
        else:
            name = vdata.name()
        headers.append(name)
    
    if mode==EXPORT_MODE_ALL:
        pre_headers = []
        if write_depths:
            pre_headers.append(keytype)
        if write_sample_ids:
            pre_headers.append("sample_id")
        headers = pre_headers + headers
        headers_del = ['%s%s%s' % (text_delimeter, header, text_delimeter) for header in headers]
        if write_headers:
            fobject.write(delimeter.join(headers_del) + newline)
        table_by_row, depths, sample_ids = data_table_all(vdata_list)
        nl = ""
        for index in range(len(depths)):
            row_str = []
            if write_depths:
                row_str.append(str(depths[index]))
            if write_sample_ids:
                s_id = sample_ids[index]
                if s_id is None:
                    row_str.append(none_hack)
                else:
                    row_str.append('%s%s%s' % (text_delimeter, s_id, text_delimeter))
            for item in table_by_row[index]:
                if item is None:
                    row_str.append(none_hack)
                elif math.isnan(item):
                    row_str.append(nan_hack)
                else:
                    row_str.append(str(item))
                    
            fobject.write(nl+delimeter.join(row_str))
            nl = newline
            
    elif mode == EXPORT_MODE_SUMMARY:
        if write_depths:
            headers = [keytype,] + headers
        if write_headers:
            fobject.write(delimeter.join(headers) + newline)
        table_by_row, depths = data_table_averaged(vdata_list)
        sample_ids = None
        nl = ""
        for index in range(len(depths)):
            row_str = []
            if write_depths:
                row_str.append(str(depths[index]))
            for item in table_by_row[index]:
                if item is None:
                    row_str.append(none_hack)
                elif math.isnan(item):
                    row_str.append(nan_hack)
                else:
                    row_str.append(str(item))
                    
            fobject.write(nl+delimeter.join(row_str))
            nl = newline
    else:
        raise ValueError("Unrecognized mode: %s" % mode)
    
    return headers, depths, sample_ids
        

    
if __name__ == "__main__":
    
    vd = VerticalData('wood_et_al_2011_bn_anom_(10_year_average)_std_anom', {'name': 'Wood et al 2011 Bn anom Standardized Anomalies', 'location_id': 'default', 'pystrat_tree_expanded': True}, 'Year_AD', None, {1920.0: [(-0.23983509542525985, None)], 1665.0: [(-0.4572532465165895, None)], 1795.0: [(-1.2973163750523498, None)], 1925.0: [(1.034782340285259, None)], 1670.0: [(1.0204005948823305, None)], 1800.0: [(-0.022698939341830718, None)], 1930.0: [(0.6002280331105003, None)], 1675.0: [(1.0204005948823305, None)], 1805.0: [(1.5852365957071466, None)], 1935.0: [(-0.6309621713833328, None)], 1680.0: [(0.0788192635023699, None)], 1810.0: [(1.2956277225932744, None)], 1940.0: [(-1.0367529877522348, None)], 1685.0: [(-0.008317193938902295, None)], 1815.0: [(2.121309105726106, None)], 1945.0: [(-1.9058616021017523, None)], 1690.0: [(-0.4572532465165895, None)], 1820.0: [(1.9763636716652198, None)], 1950.0: [(-2.296988678059825, None)], 1695.0: [(0.39747362242999956, None)], 1825.0: [(1.1216368027186308, None)], 1955.0: [(-0.6890531430108475, None)], 1700.0: [(1.1797277743461454, None)], 1830.0: [(1.107255057315702, None)], 1960.0: [(0.6727007501409435, None)], 1705.0: [(0.875455160821444, None)], 1835.0: [(0.10758275430822674, None)], 1965.0: [(1.8604637234180907, None)], 1710.0: [(0.23814644296618476, None)], 1840.0: [(-0.39916227488907463, None)], 1585.0: [(1.3681004396237177, None)], 1970.0: [(1.0928733119127738, None)], 1715.0: [(-0.7759076054442191, None)], 1845.0: [(0.41185536783292803, None)], 1590.0: [(-0.16764437340271715, None)], 1975.0: [(1.3968639304295745, None)], 1720.0: [(-0.5297259635470327, None)], 1850.0: [(-0.008317193938902295, None)], 1595.0: [(-0.4428715011136611, None)], 1980.0: [(1.6433275673346617, None)], 1725.0: [(0.44090085364668546, None)], 1855.0: [(0.38280988201917066, None)], 1600.0: [(0.20910095715242735, None)], 1985.0: [(1.0782095715019449, None)], 1730.0: [(-0.8483803224746623, None)], 1860.0: [(0.0788192635023699, None)], 1605.0: [(0.6292735189242576, None)], 1990.0: [(0.13662824012198413, None)], 1735.0: [(-0.6890531430108475, None)], 1865.0: [(-0.8049530912579766, None)], 1610.0: [(0.42651910824375705, None)], 1995.0: [(-1.326361860866107, None)], 1740.0: [(0.36842813661624224, None)], 1870.0: [(-1.3119801154631787, None)], 1615.0: [(0.8610734154185159, None)], 2000.0: [(-1.4713072949269934, None)], 1745.0: [(0.5133735706771286, None)], 1875.0: [(0.310619159996628, None)], 1620.0: [(0.9479278778518873, None)], 2005.0: [(-1.2538891438356636, None)], 1750.0: [(-0.16764437340271715, None)], 1880.0: [(2.121309105726106, None)], 1625.0: [(0.6727007501409435, None)], 1755.0: [(-0.2979260670527746, None)], 1885.0: [(1.3246732084070318, None)], 1630.0: [(-0.31230781245570305, None)], 1760.0: [(-0.8774258082884199, None)], 1890.0: [(-0.19640786420857395, None)], 1635.0: [(-1.4713072949269934, None)], 1765.0: [(-0.31230781245570305, None)], 1895.0: [(-0.28354432164984616, None)], 1640.0: [(-0.9208530395051056, None)], 1770.0: [(-0.47163499191951785, None)], 1900.0: [(-0.13859888758895975, None)], 1645.0: [(0.18005547133866998, None)], 1775.0: [(-2.0942342673793246, None)], 1905.0: [(-1.39883457789655, None)], 1650.0: [(-0.2544988358360888, None)], 1780.0: [(-1.094561964371849, None)], 1910.0: [(-1.6162527289878799, None)], 1655.0: [(-0.6021986805774759, None)], 1785.0: [(-0.9642802707217915, None)], 1915.0: [(-0.7615258600412907, None)], 1660.0: [(-0.6309621713833328, None)], 1790.0: [(-0.9498985253188631, None)]})
    vd_list = [vd,]
    
    print(export_vdata(vd_list, file_format=FORMAT_R_MATRIX, mode="summary"))

    
    
    
