# coding=UTF-8
import sys

import numpy as np
import pandas as pd

sys.path.append(".")
sys.path.append("..")
INPUT = 'data/rokid_fenci_train.csv'
OUTPUT = "feature_data/all_word2vec_feature.csv"

from distance_mayi import *
from gensim.models.doc2vec import Doc2Vec
from gensim.models import doc2vec

def cosine(v1, v2):
    v1 = np.array(v1)
    v2 = np.array(v2)
    return np.dot(v1, v2) / (np.sqrt(np.sum(v1 ** 2)) * np.sqrt(np.sum(v2 ** 2)))


def concatenate(data):
    X_set1 = data['question1']
    X_set2 = data['question2']
    X = X_set1.append(X_set2, ignore_index=True)#按行合并
    return X


class LabeledLineSentence(object):
    def __init__(self, doc_list, labels_list):
        self.labels_list = labels_list
        self.doc_list = doc_list

    def __iter__(self):
        for idx, doc in enumerate(self.doc_list):
            yield doc2vec.TaggedDocument(words=doc,
                                         tags=[self.labels_list[idx]])


def get_dists_doc2vec(data):
    docvec1s = []
    docvec2s = []
    for i in range(data.shape[0]):
        doc1 = data.iloc[i, -2]
        doc2 = data.iloc[i, -1]
        docvec1 = model.infer_vector(doc1, alpha=start_alpha, steps=infer_epoch)
        docvec2 = model.infer_vector(doc2, alpha=start_alpha, steps=infer_epoch)
        docvec1s.append(docvec1)
        docvec2s.append(docvec2)
    return docvec1s, docvec2s


# src_test = 'df_test_spacylemmat_fullclean.csv'

df_train = pd.read_csv(INPUT,names=["id", "question1", "question2", "score"],sep="\t",encoding="utf-8")
# df_test = pd.read_csv(src_test)
df_train = df_train[['id', 'question1', 'question2']]
# df_test = df_test[['test_id', 'question1', 'question2']]


# df_test.rename(columns = {'test_id': 'id'}, inplace = True)
# data = pd.concat([df_train, df_test], ignore_index = True)
data = df_train
X_train = data[['id', 'question1', 'question2']]
X = concatenate(X_train)

labels = []
for label in X_train['id'].tolist():
    labels.append('SENT_%s_1' % label)
for label in X_train['id'].tolist():
    labels.append('SENT_%s_2' % label)

docs = LabeledLineSentence(X.tolist(), labels)
it = docs.__iter__()

model = Doc2Vec(vector_size=100, window=10, min_count=2, sample=1e-5, workers=8, epochs=20)
model.build_vocab(docs)
print('Model built.')
model.train(docs, total_examples=model.corpus_count, epochs=model.iter)
print('Model trained.')

start_alpha = 0.01
infer_epoch = 10

results = get_dists_doc2vec(data)
docvec1s, docvec2s = results[0], results[1]

docvec1s = np.array(docvec1s) # [100000,100]

docvec2s = np.array(docvec2s) # [100000,100]
data = np.stack([docvec1s,docvec2s],axis=1)

from kg import jobs

features = jobs.map_batch_parallel(
    data,
    item_mapper=get_one_pair_vector_distance,
    batch_size=1000,
)

# from sklearn import preprocessing
# features = preprocessing.scale(np.array(features))
features_df = pd.DataFrame(features, columns=["doc2vec_"+c for c in temp_columns])
features_df.to_csv("feature_data/doc2vec_feature.csv",encoding="utf-8",index=False)




