'''
@author: Jang, KyoungRok [20114470]
'''
from __future__ import division
from math import log, e
import sys
import csv

class Document:
    def __init__(self, doc_id):
        self._doc_id = doc_id
        self._words = {}
        
    def get_id(self):
        return self._doc_id
    
    def get_word_list(self):
        return self._words.keys()
        
    def add_word_freq(self, word_id, word_freq):
        self._words[word_id] = word_freq
        
    def get_word_freq(self, word_id):
        if self._words.has_key(word_id):
            freq = self._words[word_id]
        else:
            freq = 0
            
        return freq
    
    def __str__(self):
        return 'id: %d, words: %d' % (self._doc_id, len(self._words)) 

class FileParser:
    DATA_PATH = './data/'
    TRAIN_DATA_PATH = DATA_PATH + 'NBC_train.data'
    TRAIN_LABEL_PATH = DATA_PATH + 'NBC_train.label'
    TEST_DATA_PATH = DATA_PATH + 'NBC_test.data'
    TEST_LABEL_PATH = DATA_PATH + 'NBC_test.label' 
    UNKNOWN_DATA_PATH = DATA_PATH + 'NBC_unknown.data'
    train_files = {}
    test_files = {}
    unknown_file = None
    
    train_documents = None
    test_documents = None
    unknown_documents = None
    
    # vocabulary discovered so far
    vocabulary = set()
    
    def __init__(self):
        self.open_files()
        self.parse_train_file()
        self.parse_test_file()
        self.parse_unknown_file()
        self.close_files()
        
    def get_discovered_vocabulary(self):
        return self.vocabulary
        
    def get_train_documents(self):
        return self.train_documents
    
    def get_test_documents(self):
        return self.test_documents
    
    def get_unknown_documents(self):
        return self.unknown_documents
        
    def parse_train_file(self):
        print 'Start parsing the training file.'
        
        self.train_documents = self.parse_labeled_files(self.train_files)
        
        print 'Parsing done.'
        
    def parse_test_file(self):
        print 'Start parsing the test file.'
        
        self.test_documents = self.parse_labeled_files(self.test_files)
    
        print 'Parsing done.'
        
    def parse_unknown_file(self):
        print 'Start parsing the unknown file.'
        
        self.unknown_documents = {'unlabeled': self.parse_unlabeled_file(self.unknown_file)}
    
        print 'Parsing done.'
         
    def parse_labeled_files(self, labeled_files):
        labels = {}
        documents = {}
        labeled_documents = {}
        
        # parse labels
        for idx, label in enumerate(labeled_files['label']):
            doc_id = idx + 1
            
            labels[doc_id] = int(label)
            
        # parse documents
        for line in labeled_files['data'].readlines():
            # parse line, then convert strings to integers
            doc_id, word_id, word_freq = line.split(' ')
            
            doc_id = int(doc_id)
            word_id = int(word_id)
            word_freq = int(word_freq)
            
            if not documents.has_key(doc_id):
                documents[doc_id] = Document(doc_id)
            
            documents[doc_id].add_word_freq(word_id, word_freq)
            
            # add the discovered word to vocabulary
            self.vocabulary.add(word_id)
            
        # organize documents by its class (label)
        # labels[doc_id] =         label of document that has id = doc_id
        # documents[doc_id] =      document that has id = doc_id
        # labeld_documents[label] = list of documents that fall into label (class)
        for label in range(1, 21):
            labeled_documents[label] = []
        
        for doc_id in labels:
            labeled_documents[labels[doc_id]].append(documents[doc_id])
            
        return labeled_documents
        
    def parse_unlabeled_file(self, unlabeled_file):
        unlabeled_documents = {}
        
        # parse unlabeled_documents
        for line in unlabeled_file.readlines():
            # parse line, then convert strings to integers
            doc_id, word_id, word_freq = line.split(' ')
            
            doc_id = int(doc_id)
            word_id = int(word_id)
            word_freq = int(word_freq)
            
            if not unlabeled_documents.has_key(doc_id):
                unlabeled_documents[doc_id] = Document(doc_id)
                
            unlabeled_documents[doc_id].add_word_freq(word_id, word_freq)
            
            # add the discovered word to vocabulary
            self.vocabulary.add(word_id)
            
        return unlabeled_documents.values()
    
    def open_files(self):
        self.train_files['data'] = open(self.TRAIN_DATA_PATH, 'r')
        self.train_files['label'] = open(self.TRAIN_LABEL_PATH, 'r')
        self.test_files['data'] = open(self.TEST_DATA_PATH, 'r')
        self.test_files['label'] = open(self.TEST_LABEL_PATH, 'r')
        self.unknown_file = open(self.UNKNOWN_DATA_PATH, 'r')
        
    def close_files(self):
        for output_file in self.train_files:
            self.train_files[output_file].close()
        
        for output_file in self.test_files:
            self.test_files[output_file].close()
            
        self.unknown_file.close()
            
    def debug_labeled_document(self, documents, out = 'debug.log'):
        output = open(out, 'w')
        
        for label in documents:
            output.write('[%d]\n' % label)
            for doc in documents[label]:
                output.write('document ' + str(doc.get_id()) + ' => ')
                
                output.write('{')
                for word_id in sorted(doc.get_word_list()):
                    output.write('%d: %d, ' % (word_id, doc.get_word_freq(word_id)))
                output.write('}')    
                
                output.write('\n')
            output.write('\n')
            
        output.close()
        
    def debug_unlabeled_document(self, documents, out = 'debug.log'):
        output = open(out, 'w')
        
        for doc in documents:
            output.write('document ' + str(doc.get_id()) + ' => ')
                
            output.write('{')
            for word_id in sorted(doc.get_word_list()):
                output.write('%d: %d, ' % (word_id, doc.get_word_freq(word_id)))
            output.write('}')    
            
            output.write('\n')
        
        output.close() 
        
class NaiveBayesClassifier:
    labels = []
    V = set()                   # V
    Ck = {}                     # p(Ck)
    prob_word_in_label = {}     # p(wj|Ck)
    
    def __init__(self):
        pass
    
    def set_vocabulary(self, vocabulary):
        self.V = vocabulary
    
    def train(self, train_documents):
        print 'Start training'
        
        # temporal values for calculation
        N = 0                       # total number of documents
        B = 0                       # number of words in the V
        
        # get the list of the labels
        self.labels = train_documents.keys()
        
        # calculate N 
        for label in self.labels:
            labeled_documents = train_documents[label]
            N += len(labeled_documents)
        
        # calculate B
        B = len(self.V)

        # calculate p(Ck)
        for label in self.labels:
            nk = len(train_documents[label])
            self.Ck[label] = nk / N
    
        # start calculating p(wj|Ck)
        for label in self.labels:
            print 'Class %d training is started!' % label
            
            labeled_documents = train_documents[label]
            
            # calculate the sum of Tct'
            total_words_in_label = 0
            freq_of_word_in_label = {}
            self.prob_word_in_label[label] = {}
            
            for word in self.V:
                freq_of_word_in_label[word] = 0
                self.prob_word_in_label[label][word] = 0
            
            for doc in labeled_documents:
                for word in doc.get_word_list():
                    freq = doc.get_word_freq(word)
                    total_words_in_label += freq
                    freq_of_word_in_label[word] += freq
                    
            # p(x|Ck)
            for word in self.V:
                self.prob_word_in_label[label][word] = (freq_of_word_in_label[word] + 1) / (total_words_in_label + B)

            print 'Class %d training done!' % label
           
        print 'Training done.'
        print '\n'
         
    def classify(self, document):
        if not self.prob_word_in_label:
            raise RuntimeError, 'You should first train the NBC!'
        
        prob_of_document_being_label = {};
        
        for label in self.labels:
            prob_of_document_being_label[label] = log(self.Ck[label])       # log(P(Ck))
            
            for word in document.get_word_list():
                freq = document.get_word_freq(word)
                
                prob_of_document_being_label[label] += (log(self.prob_word_in_label[label][word]) * freq)   # log(sum(Tct'))
        
        return (max(prob_of_document_being_label, key=prob_of_document_being_label.get), prob_of_document_being_label)
    
    def test(self, test_documents):
        print 'Start Testing'
        
        correct = 0
        wrong = 0
        
        for label in test_documents:
            for doc in test_documents[label]:
                best_label, label_probs = self.classify(doc)
                
                if best_label is label:
                    correct = correct + 1
                else:
                    wrong = wrong + 1
        
        print 'Precision on the test data:', correct / (correct + wrong)

if __name__ == "__main__":
    file_parser = FileParser()
    
    train_documents = file_parser.get_train_documents()
    test_documents = file_parser.get_test_documents()
    unknown_documents = file_parser.get_unknown_documents()
    
    vocabulary = file_parser.get_discovered_vocabulary()

    # NaiveBayesClassifier Starts here
    nbc = NaiveBayesClassifier()
    nbc.set_vocabulary(vocabulary)
    nbc.train(train_documents)
    nbc.test(test_documents)
    
    
    # classify the unknown documents.
    print '\n'
    print 'Start classifying unknown data'
    
    outputs = []
    for doc in unknown_documents['unlabeled']:
        result = nbc.classify(doc)
        
        output = [doc.get_id(), result[0]] + result[1].values()
        outputs.append(output)
    
    print 'Classifying finished.'
    
    # write the result in csv format
    output_file = open('20114470_NBC.out', 'wb')
    writer = csv.writer(output_file)
    writer.writerows(outputs)
    output_file.close()
    
    print 'The result is written to the file: 20114470_NBC.out' 
