'''
Created on Jun 11, 2014

@author: kieubinh
extract 4 features:
-

'''

# -*- coding: utf-8 -*-

linkInput = "Data\\RITE-VAL_JA_test_systemval.xml"
linkOutput="Data\\features_rite_val_test_1.txt"

#linkInput = "Data\\RITE2_JA_testlabel_unittest.xml"
#linkOutput="Data\\features_train_unittest.txt"


import MeCab
import sys
sys.path.append('../ReadData/')
import xml2dict

import os
directory_path = os.getcwd()

def readData(linkInput):
    root_path = directory_path[:directory_path.find("UnitTest")+9]
    inputFile = root_path+linkInput
    xml = xml2dict.XML2Dict()
    
    doc = xml.parse(inputFile)
    return doc

def extractLabel(doc):
    label={}
    for pair in doc['dataset']['pair']:
        if 'label' not in doc['dataset']['pair']:
            label[pair['id']['value']] = 1
        elif pair['label'].value=='Y':
            label[pair['id']['value']] = 1
            #Mutil label
        elif pair['label'].value=='F':
            label[pair['id']['value']] = 1
        elif pair['label'].value=='B':
            label[pair['id']['value']] = 1            
        else:
            label[pair['id']['value']] = 0
    return label
    
def charSimilarity(doc): 
    feature = {}   
    for pair in doc['dataset']['pair']: 
        similar_t1=0 
        len_t1 = len(pair['t1']['value'])
        for char_t1 in pair['t1']['value']:
            if char_t1 in pair['t2']['value']:
                similar_t1+=1
        similar_t2=0
        len_t2=len((pair['t2']['value']))
        for char_t2 in pair['t2']['value']:
            if char_t2 in pair['t1']['value']:
                similar_t2+=1
        feature[pair['id']['value']]=1.0*similar_t1*similar_t2/(len_t1*len_t2)
    return feature

def wordSimilarity(doc):
    feature = {}
    #print doc['dataset']['pair'][0]['t1']['value'].encode('utf8')
    for pair in doc['dataset']['pair']: 
        similar_t1=0
        t1 = pair['t1']['value'].encode('utf8')
        tagger = MeCab.Tagger("-Owakati")
        word_t1 = tagger.parse(t1)
        t2 = pair['t2']['value'].encode('utf8')
        tagger = MeCab.Tagger("-Owakati")
        word_t2 = tagger.parse(t2)
        len_t1 = len(word_t1.split())
        for w_t1 in word_t1.split():
            if w_t1 in word_t2.split():
                similar_t1+=1 
        similar_t2=0
        len_t2 = len(word_t2.split())
        for w_t2 in word_t2.split():
            if w_t2 in word_t1.split():
                similar_t2+=1 
        feature[pair['id']['value']]=1.0*similar_t1*similar_t2/(len_t1*len_t2)
        
        #print similar_t1, similar_t2, len_t1, len_t2, feature[pair['id']['value']]
    return feature
        

def jaccard(doc):
    feature = {}
    #print doc['dataset']['pair'][0]['t1']['value'].encode('utf8')
    for pair in doc['dataset']['pair']: 
        similar_t1=0
        others = 0
        t1 = pair['t1']['value'].encode('utf8')
        tagger = MeCab.Tagger("-Owakati")
        word_t1 = tagger.parse(t1)
        t2 = pair['t2']['value'].encode('utf8')
        tagger = MeCab.Tagger("-Owakati")
        word_t2 = tagger.parse(t2)
        len_t1 = len(word_t1.split())
        for w_t1 in word_t1.split():
            if w_t1 in word_t2.split():
                similar_t1+=1 
            else:
                others+=1
        similar_t2=0
        len_t2 = len(word_t2.split())
        for w_t2 in word_t2.split():
            if w_t2 in word_t1.split():
                similar_t2+=1 
        feature[pair['id']['value']]=1.0*min(similar_t1,similar_t2)/(others+len_t2)
        
    return feature

def longest_common_substring(s1, s2):
    m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
    longest, x_longest = 0, 0
    for x in xrange(1, 1 + len(s1)):
        for y in xrange(1, 1 + len(s2)):
            if s1[x - 1] == s2[y - 1]:
                m[x][y] = m[x - 1][y - 1] + 1
                if m[x][y] > longest:
                    longest = m[x][y]
                    x_longest = x
                else:
                    m[x][y] = 0
    return s1[x_longest - longest: x_longest]


def longestCommon(doc):
    feature = {}
    for pair in doc['dataset']['pair']: 
        t1 = pair['t1']['value'].encode('utf8')
        t2 = pair['t2']['value'].encode('utf8')
        str_common = longest_common_substring(t1, t2)
        feature[pair['id']['value']]=1.0*len(str_common)/len(t2)
        #print str_common
    return feature
     
def writeTraining(label, features):
    root_path = directory_path[:directory_path.find("UnitTest")+9]
    outputFile = root_path+linkOutput  
    print outputFile
    outFile = open(outputFile,'w')
    for id in label.keys():
        str_out = str(label[id])+" "
        for i in range(len(features)):
            str_out+=str(i+1)+":"+str(features[i][id])+" "
        str_out+="\n"
        outFile.writelines(str_out)
    
    outFile.close()   
    
def writeTest(label, features):
    root_path = directory_path[:directory_path.find("UnitTest")+9]
    outputFile = root_path+linkOutput  
    print outputFile
    outFile = open(outputFile,'a')
    for id in label.keys():
        str_out = str(label[id])+" "
        for i in range(len(features)):
            str_out+=str(i+1)+":"+str(features[i][id])+" "
        str_out+="#"+ id+"\n"
        outFile.writelines(str_out)
    
    outFile.close()  

if __name__ == '__main__': 
    doc = readData(linkInput)
    #output - training
    label = extractLabel(doc)
    print len(label)
    #feature 1
    features=[]
    features.append(charSimilarity(doc))

    features.append(wordSimilarity(doc))
    #print feature2
    features.append(jaccard(doc))
    #print feature3
    features.append(longestCommon(doc))
    #print feature4

    writeTest(label, features)

