#!/usr/bin/env python
#encoding=utf-8

import json
import jieba
import mysql.connector
import os
from sys import argv
import time

import sys
sys.path.append('../')
from xapian_index import Xapian_Index
from create_corpus import MyGensim
reload(sys)
sys.setdefaultencoding('utf-8')

#该类主要用simhash算法判断是否重复
#simhash算法将网页内容通过 特征提取 + 哈希 使得每一个内容对应一个固定位数的二进制哈希值(此类实现中为64位二进制)
#有点在于:当网页内容越相近，哈希值的二进制表示相同位数越多
#主要函数：1. simhash 用于返回网页对应simhash值，以便储存在数据库中   
#         2. isDuplicate 用于判断传入simhash值是否跟该正文对应simhash值足够相似
class Simhash():

    #self.duplicateBits : 判断两篇文章哈希值是否相同的hamming distance阈值
    #self.hashbits : 表示生成的simhash值的bit位数
    #self.hash : 表示该正文所对应的simhash值
    def __init__(self, content, hashbits=64):
        self.duplicateBits = 3
        self.hashbits = hashbits
        #for item in jieba.cut(content):
        #    print item

        self.hash = self.simhash(content)

    def __str__(self):
        return str(self.hash)

    def __long__(self):
        return long(self.hash)

    def __float__(self):
        return float(self.hash)


    #func : 用于计算simhash值    
    #param :content : 网页正文内容
    #return : 返回这个content的对应simhash值(64位)
    def simhash(self, content):
        #首先将content进行分词，为每个词统计词频作为权重
        word_dic = {} 
        for item in jieba.cut(content):
            if word_dic.has_key(item):
                word_dic[item] += 1
            else :
                word_dic[item] = 1

        tokens = word_dic.keys()
        #print len(tokens)
        #for item in tokens:
            #print item

        # 用一个数组表示哈希值的每一位，初始化为0
        v = [0]*self.hashbits

        count = 0
        for item in tokens:
            count += 1
            #if word_dic[item] != 1 :
            #    print item + "  " + str(word_dic[item])
                
            raw_hash = self._string_hash(item)
            bitmask = 0
            
            for i in range(self.hashbits):
                bitmask = 1 << i
                #print(raw_hash,bitmask, raw_hash & bitmask)
                if raw_hash & bitmask:
                    v[i] += word_dic[item] #查看当前bit位是否为1，是的话则将该位+分词权重(词频)
                else:
                    v[i] -= word_dic[item] #否则得话，该位-分词权重(词频)

        #print count
        
        #数组每一位皆为0或1，将其所对应的十进制数计算出来
        fingerprint = 0
        for i in range(self.hashbits):
            if v[i] >= 0:
                fingerprint += 1 << i
        
        return fingerprint

    def _string_hash(self, v):
        # A variable-length version of Python's builtin hash
        if v == "":
            return 0
        else:
            x = ord(v[0])<<7
            m = 1000003
            mask = 2**self.hashbits-1
            for c in v:
                x = ((x*m)^ord(c)) & mask
            x ^= len(v)
            if x == -1:
                x = -2
            return x


    #func : 用于计算传入simhash值跟该正文simhash值的hamming distance(即二进制表示中有多少位不同)
    #param :other_hash : 用于跟当前正文simhash值进行比较的哈希值
    #return : 返回传入simhash值跟该正文simhash值的hamming distance
    def hamming_distance(self, other_hash):
        x = (self.hash ^ other_hash) & ((1 << self.hashbits) - 1)
        hamming_distance = 0
        while x:
            hamming_distance += 1
            x &= x-1
        return hamming_distance

    #func : 用于计算传入simhash值跟该正文simhash值的相似度(两数相除)
    #param :other_hash : 用于跟当前正文simhash值进行比较的哈希值
    #return : 用于计算传入simhash值跟该正文simhash值的相似度
    def similarity(self, other_hash):
        a = float(self.hash)
        b = float(other_hash)
        if a > b: 
            return b / a
        return a / b

    #func : 用于判断传入simhash值是否跟该正文对应simhash值足够相似
    #param :other_hash : 用于跟当前正文simhash值进行比较的哈希值
    #return : 判断输入是否simhash值是否跟当前正文对应simhash值足够相似
    def isDuplicate(self, other_hash):
        if self.hamming_distance(other_hash) < self.duplicateBits:
            return True
        else:
            return False


class DB_controlter:
    def __init__(self):
        self.db = mysql.connector.connect(
            user='root', passwd='12345', db='Intelligence_analysis_system_db', 
            host='127.0.0.1', charset="utf8",  use_unicode=True, raise_on_warnings=False
        )
        self.cursor = self.db.cursor()

class ImportToDataBase(DB_controlter):
    def __init__(self):
        DB_controlter.__init__(self)

    def insert(self, title, link, text, dow_time, hash_value):
        try:
            self.cursor.execute("""INSERT INTO WebPage_webpage(title, link, text, dow_time, hash_value) VALUES('%s', '%s', '%s', '%s', '%d')""" %(title, link, text, dow_time, hash_value))
            self.db.commit()
        except:
            self.db.rollback()
            
class ImportLDATopics(DB_controlter):
    def __init__(self):
        DB_controlter.__init__(self)
        
    def import_topics(self, path='./2015-03-10:15-44.lda'):
        f = open(path, "r")
        topics = f.read()
        f.close()
        topic_list = topics.split('\n')
        topic_no = 0
        for  words_distribution in topic_list:
            self.insert(topic_no, words_distribution)
            topic_no += 1 
        
    def insert(self, topic_no, words):
        try:
            self.cursor.execute("""INSERT INTO WebPage_lda_topics(topic_no, words) VALUES('%d', '%s')""" %(topic_no, words))
            self.db.commit()
        except:
            self.db.rollback()

if __name__ == '__main__':

    script, dirname = argv
    if os.path.isdir(dirname):
        db = ImportToDataBase()
        
        #初始化xapian
        xapian_index = Xapian_Index()
        
        #导入LDA模型
        mygensim = MyGensim()
        lda = mygensim.get_ldamodel()

        for filename in os.listdir(dirname):
            filename = dirname + filename
            f = open(filename, "r")
            content = f.read()
            f.close()

            first_line = content.find('\n')
            second_line = content.find('\n', first_line+1)

            link = content[0:first_line]
            title = content[first_line+1:second_line]
            text = content[second_line+1:].decode("utf-8")
            hash_value = Simhash(text)
            dow_time = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())

            print link
            print title
            #print type(text)
            #print type(hash_value.__float__())
            print dow_time
    
            db.insert(title, link, text, dow_time, hash_value.__float__())
            
            #将通过LDA模型计算出的主题分布放入索引部分
            doc_vec = mygensim.doc2bow(text)
            xapian_index.index_topic(item=link, topic_distribution=lda[doc_vec])
