# -*- coding: utf-8
import os,sys
import math
import time
import profile

#资料blog地址
#http://www.cppblog.com/sunrise/archive/2012/04/12/171089.html

class wechatFAQ():
    def __init__(self):
        self.synDir = {}
        self.nearDir = {}
        self.stopwordDir = {}

    def load(self):
       #load stop word
       rfile = open("data/stop_word.txt","r")
       self.stopwordDir = {}
       while(1):
           line = rfile.readline()
           if not line:
               break
           line = line.strip()
           self.stopwordDir[line]=1
       rfile.close()

       self.synDir = self.library_load('data/synonym')
       self.nearDir = self.library_load('data/apposition')


    def mmseg(self,question):
       mmseg.dict_load_defaults()
       algor = mmseg.Algorithm(question)
       questionList = []
       for tok in algor:
            #remove stop word
              if not self.stopwordDir.has_key(tok.text):
                  questionList.append(tok.text)
       return questionList

    def questions_mmseg(self):
       #word segmentation
       rfile = open("data/Q.txt","r")
       wfile = open("data/Q_split.txt","w+")
       while(1):
           line = rfile.readline()
           if not line:
               break
           line = line.strip()
           lineList = self.mmseg(line)
           for i in xrange(len(lineList)):
               if i != len(lineList)-1:
                  wfile.write(lineList[i]+"/")
               else:
                  wfile.write(lineList[i])
           wfile.write("\n")
       wfile.close()
       rfile.close()

    #library load
    def library_load(self,inputfile):
        libraryDir = {}
        rfile = open(inputfile,"r")
        depth = 1
        while(1):
           line = rfile.readline()
           if not line:
               break
           line = line.strip()
           lineList = line.split(" ")
           for word in lineList:
               libraryDir[word] = depth
           depth += 1
        return libraryDir

    #words sim
    def words_sim(self,word1,word2):
        alfa = 4.8
        neardist = 3
        syndist = 1.5
        if word1==word2:
            return 1
        if self.synDir.has_key(word1) and self.synDir.has_key(word2) and self.synDir[word1]==self.synDir[word2]:
            return  alfa/(alfa+syndist)
        if self.nearDir.has_key(word1) and self.nearDir.has_key(word2) and self.nearDir[word1]==self.nearDir[word2]:
            return  alfa/(alfa+neardist)
        return 0.0

    def sentences_sim(self,questionList1,questionList2):
        """
        计算两个问句的相似度
        """
        simsum = 0.0
        for word1 in questionList1:
            for word2 in questionList2:
                simsum += self.words_sim(word1,word2)
        len1 = len(questionList1)
        len2 = len(questionList2)
        questionLen = float(len1)+abs(len1-len2)/2
        return simsum/questionLen

    def question_sim(self,question):
        questionList1 = self.mmseg(question)
        rfile = open('data/Q_split.txt','r')
        MAX = -1
        maxmatch = ""
        while(1):
           line = rfile.readline()
           if not line:
               break
           line = line.strip()
           #print question,line
           questionList2 = line.split("/")
           q_sim = self.sentences_sim(questionList1,questionList2)
           if q_sim > 0.2:
               print q_sim,"  ",line
        print MAX,"  ",maxmatch

if __name__ == "__main__":
   t = wechatFAQ()
   starttime = time.clock()
   t.load()
   #t.questions_mmseg()
   endtime = time.clock()
   print (endtime-starttime)

   starttime = time.clock()
   t.question_sim('使用手机刷卡器如何保证安全')
   endtime = time.clock()
   print (endtime-starttime)
   ##t.load()
   ##profile.run("t.question_sim('交强险是什么啊？')")
