'''
Created on Oct 30, 2012

@author: dolphinigle
'''

import document_query_processing as dqp
import comparison as cmpr
import relevanceFeedback as rf
import evaluator as eva

class Controller():
  '''Responsible as a gate between view and engine.
  
  Created once 'Commence War' (or process) is clicked.
  Will be used to ask for action on interactive and experimental
  queries.'''
  
  #Finish
  def __init__(self,
               stopword_filename,  # Will be None if not given
               inverted_index_filename,  # Will be None if not given
               document_collection_filename,  # Will be None if not given
               query_filename,  # Will be None if not given
               relevance_judgment_filename,  # Will be None if not given
               is_use_stemming,  # 1 if stemming should be used, otherwise 0
               doc_tf_type,  # String representing tf type: 'binary', 'log', 'raw', or 'augmented'
               query_tf_type,  # idem above
               doc_is_use_idf,  # 1 if idf should be used, or 0.
               query_is_use_idf,  # idem above
               doc_is_normalized,  # 1 if normalized should be used, or 0
               query_is_normalized):  # idem abovve
    # TODO(irvan): Novan it's your task
    
    #Simpan nama relevance judgment file untuk nanti
    self.relevance_judgment_filename = relevance_judgment_filename
    
    print 'Initializing...'
    term_weighting_code = ""
    
    #Set doc_tf
    if doc_tf_type == "binary":
      term_weighting_code += "b"
    elif doc_tf_type == "log":
      term_weighting_code += "l"
    elif doc_tf_type == "augmented":
      term_weighting_code += "a"
    elif doc_tf_type == "raw":
      term_weighting_code += "n"
    
    if doc_is_use_idf:
      term_weighting_code += "t"
    else:
      term_weighting_code += "n"
    
    if doc_is_normalized:
      term_weighting_code += "c"
    else:
      term_weighting_code += "n"
    
    term_weighting_code += "."
    
    if query_tf_type == "binary":
      term_weighting_code += "b"
    elif query_tf_type == "log":
      term_weighting_code += "l"
    elif query_tf_type == "augmented":
      term_weighting_code += "a"
    elif query_tf_type == "raw":
      term_weighting_code += "n"
    
    if query_is_use_idf:
      term_weighting_code += "t"
    else:
      term_weighting_code += "n"
    
    if query_is_normalized:
      term_weighting_code += "c"
    else:
      term_weighting_code += "n"
    
    self.dqp_instance = dqp.DocumentQueryProcessing(document_collection_filename, stopword_filename, is_use_stemming, \
                                                    term_weighting_code, inverted_index_filename)
    
    self.dqp_instance.documentCollectionProcessing()
    
    #Instance untuk comparison dan menghitung ranking
    self.cmpr_instance = cmpr.Comparison(self.dqp_instance)
    
    self.cmpr_instance.searchFileQuery(inverted_index_filename, query_filename)
    #
    #print "query twm : ", self.dqp_instance.query_term_weighting_code
    #x = self.cmpr_instance.calcQueryVector(self.dqp_instance.query_term_weighting_code, "" , True)
    #print "EAAA"
    #y = self.cmpr_instance.calcDocumentVector()
    #print "EAAA"
    #self.cmpr_instance.calcAllRank(x, y, 0.0)
    #print "EAAA"
    
    self.cmpr_instance.calcAllRank(self.cmpr_instance.calcQueryVector(self.dqp_instance.query_term_weighting_code), self.cmpr_instance.calcDocumentVector(), 0.0)
    
    
  #Finish
  def SaveResultToFile(self, filename):
    self.retrieval_result_file = filename
    '''The user asks you to save the experimental results to the given filename'''
    # TODO(irvan): Novan.
    print 'Save result to file %s' % filename
    self.cmpr_instance.writeRetrievalResult(filename)
  
  
  def SavePerformanceToFile(self, filename):
    self.evaluation_file = filename
    '''Saves performance result to the given filename'''
    # TODO(irvan): Novan
    print 'Save performance to file %s' % filename
    ev = eva.Evaluator(self.retrieval_result_file, self.relevance_judgment_filename)
    ev.writeResult(self.evaluation_file)
    
    #ev = Evaluator('result.txt', 'rel.txt')
    #ev.writeResult('evaluation.txt')
  
  #Finish
  def ExecuteInteractiveQuery(self, query):
    '''Executes a query in interactive mode and return a list of results
    
    More exactly, the return type will be a list of documents in order from the
    most relevant, second relevant, etc. Each document should be a 3-tuple,
    the first element is the title of the document (if present), the second
    should be the content of the document, and the third should be the doc id
    of the document.
    
    
    See current return value for example'''
    #return [
    #  ('judul_1', 'isi_dokumen_pertama\nCeritanya pada suatu hari ada seorang anak', 32),
    #  ('judul 2', 'Hey gw ganteng lho', 6),
    #  ('Cinderella', 'Pada suatu hari ada seorang ganteng bernama Irvan', 7)]
    
    
    document_relevant_id_list = []
    self.cmpr_instance.searchOneQuery( self.dqp_instance.inverted_file_saved_path, query)
    document_relevant_id_list = self.cmpr_instance.calcAllRank(self.cmpr_instance.calcQueryVector(self.dqp_instance.query_term_weighting_code), self.cmpr_instance.calcDocumentVector(), 0.0)[0][1]
    
    #print "document_relevant_id_list : ", document_relevant_id_list
    #print type(document_relevant_id_list[0])
    #Make document from document_id
    document_relevant_list = []
    for document_id in document_relevant_id_list:
      one_document_tuple = (self.dqp_instance.document_list[str(document_id)].title, self.dqp_instance.document_list[str(document_id)].author + " " + self.dqp_instance.document_list[str(document_id)].content, document_id)
      document_relevant_list.append(one_document_tuple)
    
    #print "document_relevant_list : "
    #print document_relevant_list
    return document_relevant_list
  
  #Finish
  def ExecuteInteractiveQueryWithRelevanceFeedback(
      self, query, relevant_documents, irrelevant_documents, method, **kwargs):
    '''Executes a query in interactive mode using relevance feedback and return
    the result.
    
    The format is the same with ExecuteInteractiveQuery
    
    relevant and irrelevant documents are list of document IDs of the
    documents.
    relevant_documents must already be sorted from the most to the least relevant.
    idem irrelevant documents
    
    method is a string of either "rochio", "ide_regular", or "ide_dec_hi"
    
    If method is rochio, kwargs['beta'] and kwargs['gamma'] will contain the
    beta and gamma values for the method.'''
    # if method == 'rochio':
    # Rochio(method, kwargs['beta'], kwargs['alpha'])
    
    #
    #print 'Relv Feedback'
    #print relevant_documents
    #print irrelevant_documents
    #print method
    #print kwargs
    #return [
    #  ('judul_1', 'Relevance Feedback\nfdsfdfasdf Pak On Lee', 33),
    #  ('judul 2', 'Hey gw ganteng lho', 6),
    #  ('Cinderella', 'Pada suatu hari ada seorang ganteng bernama Irvan', 7)]
    
    rf_instance = rf.relevanceFeedback(query, relevant_documents, irrelevant_documents, self.dqp_instance.inverted_file_saved_path, self.cmpr_instance)
    document_relevant_id_list = []
    if method == "rochio":
      document_relevant_id_list = rf_instance.calculate("rocchio", kwargs['beta'], kwargs['gamma'])
    elif method == "ide_regular":
      document_relevant_id_list = rf_instance.calculate("ide", 1, 1)
    elif method == "ide_dec_hi":
      document_relevant_id_list = rf_instance.calculate("dechi", 1, 1)
    
    print "document relevant oleh feedback : ", document_relevant_id_list
    
    #Make document from document_id
    document_relevant_list = []
    for document_id in document_relevant_id_list:
      one_document_tuple = (self.dqp_instance.document_list[str(document_id)].title, self.dqp_instance.document_list[str(document_id)].author + " " + self.dqp_instance.document_list[str(document_id)].content, document_id)
      document_relevant_list.append(one_document_tuple)
    
    return document_relevant_list
