from __future__ import with_statement 

import csv
import gzip
import os
import numpy
import re
import sqlite3
import urllib

import pcv
from pcv import Env

"""
@todo: orange is actually a sub-directory
@todo: mixture is a totally different data structure
@todo: phoneme has some specialty columns. need to figure out what to do with those.
"""

class SpaceDelimReader(object):
    def __init__(self,f):
        self.f = f
    def __iter__(self):
        return self
    def next(self):
        l = self.f.next().split()
        return l

class GzipOpener(object):
    """ A wrapper around gzip.GzipFile to enable the new with statements """
    def __init__(self,fn):
        self.f = gzip.GzipFile(fn)
    def __enter__(self):
        return self.f
    def __exit__(self, type, value, traceback):
        self.f.close()

def gzip_open(fn):
    """ Helper function for L{GzipOpener}, use this in with statements """
    return GzipOpener(fn)



class EleStatLearnData(object):
    """ 
    Data sets from 'Elements of Statistical Learning' 
    See the "info" attribute of individual data sets for a lengthier description
    """

    class _DataSet(object):
        """ """
        
        class _Item(object):
            """ """
            def __init__(self, name=None, local=None, remote=None, dsname=None, **kwargs):
                self.dsname = dsname # data set name (needed for special handling)
                self.name  = name
                self.local = local
                self.remote= remote
                self.header = None
                self._data = None

            def clear_cache(self):
                """ remove local cache of this data """
                if os.path.isfile( self.local ):
                    os.remove( self.local )
                self._data = None

            def get(self):
                if self._data is None:
                    if not os.path.isfile( self.local ):
                        local_dir = os.path.split(self.local)[0]
                        if not os.path.isdir( local_dir ):
                            os.makedirs( local_dir )
                        urllib.urlretrieve(self.remote, self.local)


                    if self.name == "info":
                        with open(self.local) as f:
                            self._data = f.read()        
                            
                        self.__doc__ += "\n\n" + self._data
                        
                        # special handling
                        if self.dsname == "saheart":
                            self.__doc__ += "@attention Special Handling\nFamily History [famhist] attribute is Present/Absent in the file.\nChanged to 1.0/0.0 in the returned array"
                        elif self.dsname == "bone":
                            self.__doc__ += "@attention Special Handling\ngender male=-1, female=1."
                        
                    else:
                        # try to read as a csv file
                        if os.path.splitext(self.local)[1] == ".gz":
                            # compressed with gzip
                            open_func = gzip_open
                        else:
                            # uncompressed
                            open_func = open

                        with open_func(self.local) as f:
                            sample = f.read(1024)
                            f.seek(0)
                            is_csv = re.search(r",",sample)

                            has_header = re.search(r"[A-Za-z]",sample)
                            if self.dsname=="khan" and self.name=="ytest":
                                has_header = False
                                
                            if is_csv:
                                reader = csv.reader(f)
                            else:
                                reader = SpaceDelimReader(f)
                                
                            if has_header:
                                self.header = reader.next()


                            data = []
                            for line in reader:                       
                                if self.dsname == "saheart" and self.name=="data":
                                    line[5] = {"Present":True,"Absent":False}[line[5]]
                                elif self.dsname == "bone" and self.name=="data":
                                    line[2] = {"male":-1,"female":1}[line[2]]
                                elif self.dsname == "prostate" and self.name=="data":
                                    line[10] = {"T":"1","F":"0"}[line[10]]
                                # Numerous of these use "NA" to be NaN
                                def handleNA(x):
                                    if x.strip()=="NA":
                                        return ""
                                    else:
                                        return x
                                line = map(lambda x: handleNA(x), line)
                                def valornan(s):                                
                                    if len(s.strip())==0:
                                        return numpy.nan
                                    else:
                                        return float(s)
                                
                                if len(line)>0:                                
                                    data.append( map(lambda x: valornan(x), line ) )
                                
                            self._data = numpy.array(data)

                            if self.header is None:
                                self.header = map(lambda x: "col_%d" % (x,), xrange(self._data.shape[1]))

                return self._data
    
    
        """ A data set from 'Elements of Statistical Learning' """
        def __init__(self, dsname, data):
            self.dsname = dsname
            self.names = map(lambda x: x["name"], data)

            ss = "\n\nData Set Names:\n\t" + "\n\t".join(self.names) + "\n"
            self.__doc__ += ss

            for item in data:
                obj = self._Item(dsname=self.dsname, **item)
                setattr(self,item["name"],obj)
                setattr(self,"_" + item["name"],obj)

        def header(self,name):
            return getattr(self, "_"+name).header
        
        def __getattribute__(self,name):
            """
            I'm doing some magic here so that the dataset and info objects
            act as properties. This way, for an EleStatLearnData object e
            you can call e.[dataset].info and get back the data you want.
            """
            if name in object.__getattribute__(self,"names"):
                return object.__getattribute__(self,name).get()
            else:
                return object.__getattribute__(self,name)

                
    def __init__(self):
        base_url = "http://www-stat.stanford.edu/~tibs/ElemStatLearn/datasets/"
        
        pieces = {
             "cancer" : {"base":"14cancer", "data" : ("xtrain","ytrain","xtest","ytest")},
             "bone" : {} ,
             "countries" : {} ,
             "galaxy" : {} ,
             "laozone" : {"base":"LAozone"},
             "marketing" : {} ,
             "mixture" : {"base":"mixture.example"},
             "nci" : {} ,
             "ozone" : {} ,
             "phoneme" : {} ,
             "prostate" : {} ,
             "sachs" : {"data" : ("data","covmatrix") },
             "khan" : {"data" : ("xtrain","ytrain","xtest","ytest") },
             "orange" : {"data" : (("","data"),)},
             "saheart" : {"base":"SAheart"},
             "spam" : {"data" : ("data","traintest")},
             "vowel" : {"data" : ("train","test")},
             "waveform" : {"data" : ("train","test")},
             "zip" : {"data" : (("train.gz","train"), ("test.gz","test"))}
        }
        
        self.save_dir = os.path.join(Env.home_dir, "data","EleStatLearn")
        
        self.names = pieces.keys()
        self.names.sort()
        
        ss = "\nNames:\n" + ", ".join( self.names ) + "\n"
        self.__doc__ += ss
        
        self._parse_pieces(base_url, pieces) # create an attribute from each element of pieces
        
    def _parse_pieces(self, url, pieces):
        for name in pieces.keys():
            base = pieces[name].get("base",name)
            data = list(pieces[name].get("data",("data",)))
            data.append("info")
            
            data2= []
            for item in data:
                if isinstance(item,tuple):          
                    remote = base
                    if len(item[0])>0:
                        remote += "." + item[0]
                    tt = {"remote":remote, "name":item[1]}
                    ext= os.path.splitext(item[0])[1]
                else:
                    tt = {"remote":base + "." + item, "name":item}
                    ext= ""
                
                if ext=="":
                   ext = ".txt"
                tt["ext"] = ext
                
                tt["local"] = os.path.join(self.save_dir, name + "_" + tt["name"] + tt["ext"])
                data2.append(tt)
                
            for i in xrange(len(data2)):
                data2[i]["remote"] = url + data2[i]["remote"]
                
            setattr(self, name, self._DataSet(name, data2))

        

class LocalFiles(object):
    """
    Information about image files stored on local disks.    
    """
    def __init__(self, save_dir=None):
        """
        
        """
        self.save_dir = pcv.setdefault( save_dir, Env.home_dir )
        
        self.image_exts = [".jpg", ".jpeg"]
        self.watched_dirs = []
        
        self.con = sqlite3.connect(self._dbname)


    _dbname = property( lambda s: os.path.join(s.save_dir, "sources_LocalFiles.db") ) 
        
    def _createdb(self, overwrite=False):
        dbname = self._dbname
        if os.path.isfile( dbname ) and not overwrite:
            raise IOError( "%s exists and overwrite is False" % (dbname,))
        else:
        
            sql = """
            drop table if exists [fileinfo];
            drop table if exists [file_keywords];

            create table [fileinfo] (
                [id] INTEGER PRIMARY KEY,
                [path] TEXT UNIQUE NOT NULL
            );
            create table [fileinfo_keywords] (
                [fileid] INTEGER,
                [keyword] TEXT NOT NULL,
                PRIMARY KEY(fileid, keyword)
            );
            """
            
            con = self.con
            con.executescript(sql)
            con.commit()
            
            """
            cur = con.cursor()
            cur.execute( "select * from sqlite_master WHERE type='table'" )
            for row in cur:
                print row
            """
            
    def _add_one_dir(self, cdir, files):
        """
        @todo: don't grab everything from table.
        """
        files = [(os.path.join(cdir,f),) for f in files]
        con = self.con
        # or ignore 
        # print files
                
        con.executemany( "insert or ignore into fileinfo (path) values (?)", files)
        con.commit()
        
    def _scan_watched_dirs(self):
        regex = re.compile("|".join(self.image_exts) + "$", re.I)
        for d in self.watched_dirs:
            for root, dirs, files in os.walk(d):
                files = filter( lambda s: regex.search(s), files )
                if len(files)>0:
                    self._add_one_dir( root, files )
    
    def print_all_paths(self):
        con = self.con
        cur = con.cursor()

        cur.execute( "select * from fileinfo" )
        for row in cur:
            print row

class BerkeleySegmentationDataset(object):
    """ 
    More information: U{http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/segbench/}
    
    The dataset comes in two pieces:
        * images (22MB gzipped)
        * segmentations (27MB gzipped)
    
    Work based on the dataset should cite our  ICCV 2001 paper (repeated in property C{cite}):
    
    @InProceedings{MartinFTM01,
      author = {D. Martin and C. Fowlkes and D. Tal and J. Malik},
      title = {A Database of Human Segmented Natural Images and its
               Application to Evaluating Segmentation Algorithms and
               Measuring Ecological Statistics},
      booktitle = {Proc. 8th Int'l Conf. Computer Vision},
      year = {2001},
      month = {July},
      volume = {2},
      pages = {416--423}
    }    
    """

    def __init__(self):
        self._images_url = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/segbench/BSDS300-images.tgz"
        self._seg_url = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/segbench/BSDS300-human.tgz"
        self.cite = """
            @InProceedings{MartinFTM01,
            author = {D. Martin and C. Fowlkes and D. Tal and J. Malik},
            title = {A Database of Human Segmented Natural Images and its
                     Application to Evaluating Segmentation Algorithms and
                     Measuring Ecological Statistics},
            booktitle = {Proc. 8th Int'l Conf. Computer Vision},
            year = {2001},
            month = {July},
            volume = {2},
            pages = {416--423}
            }"""


if __name__=="__main__":
    obj = LocalFiles()
    # obj.watched_dirs.append( "Z:\\shared\\photos" )
    # obj._scan_watched_dirs()
    obj.print_all_paths()    
