# BigData course project
# Serial version of training algorithm for SOM
# Train set class

import sys
from util import *
from sparse_vec import SparseVec

class TrainSet:
    # constructor knows how to build from file (using sparse vectors)
    def __init__(self, dim, size, file):
        self.dim = dim
        self.size = size
        try:
            f = open(file, "r")
        except IOError:
            log("Could not read trainset file %s\n", file)
            raise
        try:
            # for the moment just save the lines, sparse vectors
            # will be materialized on demand
            self.lines = [f.readline() for i in xrange(size)]
        except IOError:
            log("Could not read trainset line %d\n", i)
            raise
        f.close()

    # dump the trainset (one train vector per line)    
    # materializes all vectors, so this is very expensive (just for debugging)
    def dump(self, file):
        vs = [self[i] for i in xrange(len(self.lines))]
        dump_vector_list("trainset", vs, file)

    # avoid exposing our inner vs, expose index operator instead
    def __getitem__(self, i):
        return SparseVec(self.dim, self.lines[i])
