# -*- coding:utf-8 -*-
import jieba
jieba.load_userdict("data/userdict.txt")
import jieba.posseg as pseg
import pandas as pd
import numpy as np
import codecs
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
from gensim.models import word2vec
import math


def square(x):
    return pow(x,2)

#停用词
stopwords = codecs.open('data/stopwords','r',encoding='utf8').readlines()
stopwords = [ w.strip() for w in stopwords ]

# 读取文件内容,以list形式返回  [[],[],[],...[],[]]
def readFile(filename, col, number):
    df = pd.read_csv(filename)
    return np.array(df[[col]].head(number)).tolist()

#整体分词
def segment_text(filename, col, number):
    data = readFile(filename, col, number)
    corpus = []
    for row in data:
        corpus_row = segment_row(row)
        corpus.append(corpus_row)
    return corpus

#一条数据分词
def segment_row(row):
    corpus_row = []
    #结巴分词
    words = pseg.cut((''.join(row)).replace(' ',''))  #row是列表，先转成字符串，再去除字符串空格
    # print ''.join(row)
    for word, flag in words:
        # print word,flag
        if word not in stopwords:
            # print word
            corpus_row.append(word)
    return corpus_row

#两个相邻余弦相似度
def cosin_similarity(sentence_a, sentence_b):
    fenzi = 0
    for i,j in zip(sentence_a, sentence_b):
        fenzi += i*j

    a_mor = map(square, sentence_a)  #计算平方
    b_mor = map(square, sentence_b)
    fenmu = math.sqrt(sum(a_mor))*math.sqrt(sum(b_mor))

    sim = (fenzi*1.0)/fenmu
    return sim

if __name__ == '__main__':
    corpus_ctrip = segment_text('data/ctrip.csv', 'address', 2000)
    corpus_elong = segment_text('data/elong.csv', 'address', 2000)
    corpus = corpus_ctrip+corpus_elong
    model = word2vec.Word2Vec(corpus, min_count=1)
    print model
    print model.similarity(u"奉贤区",u"北京")

    df_ctrip = pd.read_csv('data/ctrip.csv')
    df_elong = pd.read_csv('data/elong.csv')
    # data_ctrip = readFile('data/ctrip.csv', 'address', 10)
    # data_elong = readFile('data/elong.csv', 'address', 10)
    for row_ctrip in np.array(df_ctrip[['title','address']].head(2000)).tolist():
        #row[0]:title row[1]:address
        print (''.join(row_ctrip[1])).replace(' ','')
        word2vec_row_ctrip = [] #句子向量由多个词向量构成列表表示
        sentence_ctrip = []
        #结巴分词
        words = pseg.cut((''.join(row_ctrip[1])).replace(' ',''))  #row是列表，先转成字符串，再去除字符串空格
        for word, flag in words:
            if word not in stopwords:
                # print word
                sentence_ctrip.append(word)
                word2vec_row_ctrip.append(model[word])
        #计算句子向量(词平均)
        word2vec_sentence_ctrip = map(sum,zip(*word2vec_row_ctrip))
        print word2vec_sentence_ctrip
        #elong数据对比
        for row_elong in np.array(df_elong[['title','address']].head(2000)).tolist():
            word2vec_row_elong = []
            sentence_elong = []
            #结巴分词
            words = pseg.cut((''.join(row_elong[1])).replace(' ',''))  #row是列表，先转成字符串，再去除字符串空格
            for word, flag in words:
                if word not in stopwords:
                    #print word,model[word]
                    sentence_elong.append(word)
                    word2vec_row_elong.append(model[word])
            # print word2vec_row_elong
            word2vec_sentence_elong = map(sum, zip(*word2vec_row_elong))
            print word2vec_sentence_elong

            '''
            #向量求相似度
            ctrip_mor = map(square,word2vec_sentence_ctrip)
            elong_mor = map(square,word2vec_sentence_elong)
            print sum(ctrip_mor)
            s = 0
            for i,j in zip(word2vec_sentence_ctrip, word2vec_sentence_elong):
                s += i*j
            sim = s/math.sqrt(sum(ctrip_mor)*sum(elong_mor))
            '''
            sim = cosin_similarity(word2vec_sentence_ctrip, word2vec_sentence_elong)
            print ''.join(row_ctrip)+'~'*10+''.join(row_elong)+'~'*10+'sim'
            print sim

            # #词语求相似度(列表与列表直接求相似度)
            # print ''.join(row_ctrip)+'~'*10+''.join(row_elong)+'~'*10+'相似度为'
            # print model.n_similarity(sentence_ctrip, sentence_elong)
            break
        break

