'''
Created on 16/12/2012

@author: Jorge
'''
from libsvm.python.svmutil import *


from create_data_sets import DatasetAcess
from VectorizedClaim import Example
from dataset import convert_sparse_to_vector
from classifiers.SVMTreeExample import SVMTreeExample
from sklearn.datasets import load_svmlight_file
import numpy as np

class CustomDataSet(DatasetAcess):

    def __init__(self):
        self.training_set_path = '../data/customdata/training_set.tab'
        self.validation_set_path = '../data/customdata/validation_set.tab'
        self.test_set_path = '../data/customdata/test_set.tab'
        
    def save_dataset(self, file_name, dataset):
        file_object = open(file_name, "w")
        for example in dataset:
            file_object.write( str(example.get_y()))
            file_object.write(' ')
            x = example.get_vector_X()
            #keys = sparse.keys()
            #keys.sort()
            for feature in x:
                print "feature: ",str(feature)
                file_object.write(str(feature)+' ')
            file_object.write('\n')
        file_object.close()
        
    def read_dataset(self, file_name):
        
        dataset=[]
        
        f = open(file_name)
        for line in f:
            tokens= line.split()
            y = tokens[0]
            x = [float(t)  for t in tokens[1:]]
            dataset.append(SVMTreeExample(x, y))
            #print 'infinito' if not np.isfinite(sum(x)) else None
        return dataset
        

class DatasetForLibSVM(DatasetAcess):
    '''
    classdocs
    '''
    def __init__(self):
        self.training_set_path = '../data/libsvm/training_set.tab'
        self.validation_set_path = '../data/libsvm/validation_set.tab'
        self.test_set_path = '../data/libsvm/test_set.tab'
    
    def save_dataset(self, file_name, dataset):
        file_object = open(file_name, "w")
        for example in dataset:
            file_object.write( str(example.get_numerical_category()))
            file_object.write(' ')
            sparse = example.sparse_data()
            keys = sparse.keys()
            keys.sort()
            for index in keys:
                file_object.write(str(index)+':'+str(sparse[index])+' ')
            file_object.write('\n')
        file_object.close()
        
    def read_dataset(self, file_name):
        Y, X = svm_read_problem(file_name)
        dataset=[]
        for i in range(len(X)):
            dataset.append(SVMTreeExample(X[i], Y[i]))
        return dataset
        

class DatasetForOrange(DatasetAcess):
    
    def __init__(self):
        self.training_set_path = '../data/orange/training_set.tab'
        self.validation_set_path = '../data/orange/validation_set.tab'
        self.test_set_path = '../data/orange/test_set.tab'
    
    def save_dataset(self, file_name, dataset):
        output = open(file_name,'w')
        sparse = [e.sparse_data() for e in dataset]
        vector_list = convert_sparse_to_vector(sparse)
        n = len(vector_list[0])
        header = 'category'
        for i in xrange(n):
            header+='\tw'+str(i)
        header+='\n'
        output.write(header)
        #write type features
        output.write('d\t')
        output.write(''.join( 'c\t' for i in xrange(n) ))
        output.write('\n')
        output.write('class\n')
        
        for i in range(len(dataset)):
            line = dataset[i].get_y().encode('utf-8').replace(' ','_')
            for count in vector_list[i]:
                line+='\t'+str(count)
            line+='\n'
            output.write(line)
        
        output.close()
        
 
        
class DatasetForWeka(DatasetAcess):
    
    def __init__(self):
        self.training_set_path = '../data/weka/training_set.arff'
        self.validation_set_path = '../data/weka/validation_skjet.arff'
        self.test_set_path = '../data/weka/test_set.arff'
        
    def save_dataset(self, file_name, dataset):
        output = open(file_name,'w')
        output.write('@RELATION claims\n\n')
        sparse = [e.sparse_data() for e in dataset]
        vector_list = convert_sparse_to_vector(sparse)
        n = len(vector_list[0])
        for i in xrange(n):
            output.write(' @ATTRIBUTE w'+str(i)+'  NUMERIC\n')
        temp = '@ATTRIBUTE class {'
        for c in Example.categories:
            temp+= c+','
        temp+= '}\n\n'
        output.write(temp)
        output.write('@DATA\n')
        
        for i in range(len(dataset)):
            line =''
            for count in vector_list[i]:
                line+=str(count)+','
            line+= dataset[i].get_y().encode('utf-8')+'\n'
            output.write(line)
        
        output.close()
        
         

if __name__ == '__main__':
    data = CustomDataSet()
    data.create_new_datasets()