from collections import defaultdict

import jieba
import numpy as np
import pandas as pd
from elasticsearch import Elasticsearch, helpers
from gensim import utils
from gensim.models import Word2Vec
from milvus import Milvus
from sklearn.manifold import TSNE


def segment():
    jieba.enable_paddle()
    with open("data/search.txt", "r", encoding="utf8") as f:
        with open("data/search.segment.txt", "w", encoding="utf8") as w:
            for l in f:
                w.write(' '.join(list(jieba.cut(l, cut_all=False, use_paddle=True))) + '\n')

class MyCorpus:
    def __iter__(self):
        for line in open("data/search.segment.txt", "r", encoding="utf8"):
            yield utils.simple_preprocess(line)

def train():
    sentences = MyCorpus()
    # 构建模型
    model = Word2Vec(sentences, min_count=1, workers=8)
    model.save('data/search.model')

def get_word_count_df():
    words = defaultdict(int)
    with open("data/search.segment.txt", "r", encoding="utf8") as f:
        for line in f:
            word_list = utils.simple_preprocess(line)
            for word in word_list:
                words[word] += 1
            # break
    df = pd.DataFrame(data=words.items(), columns=["word", "count"])
    df.to_pickle("data/word_count.pkl")

def get_vectors_df():
    model = Word2Vec.load("data/search.model")
    num_dimensions = 2

    vectors = np.asarray(model.wv.vectors)
    word = np.asarray(model.wv.index_to_key)

    tsne = TSNE(n_components=num_dimensions, random_state=0)
    vectors = tsne.fit_transform(vectors)
    vector_series = pd.DataFrame(vectors, columns=["x","y"])
    vectors = vector_series.apply(lambda r: str(r.x) + ',' + str(r.y), axis=1)

    df = pd.DataFrame({"vectors": vectors, "word": word})
    df.to_pickle("data/vectors.pkl")

def merge_dataframes():
    word_df = pd.read_pickle("data/word_count.pkl")
    vector_df = pd.read_pickle("data/vectors.pkl")

    df = pd.merge(word_df, vector_df, on="word")
    df.to_pickle("data/word_database.pkl")

def insert_es():
    es = Elasticsearch(["http://localhost:9200"])

    print(es.indices.delete("search_word_analyse"))

    es.indices.create('search_word_analyse', body={
        "settings": {
            "index": {
                "number_of_shards": 1,
                "number_of_replicas": 0,
            }
        },
        "mappings": {
            "_doc": {
                "properties": {
                    "word": {
                        "type": "text"
                    },
                    "id": {
                        "type": "keyword"
                    },
                    "count": {
                        "type": "keyword"
                    },
                    "vectors": {
                        "type": "keyword"
                    }
                }
            }
        }
    })

    actions = list()
    word_database_df = pd.read_pickle("data/word_database.pkl")
    for i,row in word_database_df.iterrows():
        action = {'_op_type': 'index',
                  '_index': 'search_word_analyse',
                  '_type': '_doc',
                  '_source': {
                      'id': i,
                      'word': row["word"],
                      'count': row["count"],
                      'vectors': row["vectors"],
                  }
        }
        actions.append(action)
    helpers.bulk(es, actions=actions)

def insert_milvus():
    collection_name = 'search_word_analyse'
    milvus = Milvus(host='localhost', port='19530')

    print(milvus.drop_collection(collection_name))

    milvus.create_collection({
        "collection_name": collection_name,
        "dimension": 2
    })

    ids = []
    records = []
    word_database_df = pd.read_pickle("data/word_database.pkl")
    for i, row in word_database_df.iterrows():
            ids.append(i)
            records.append([float(x) for x in row["vectors"].split(",")])
    result = milvus.insert(collection_name, records=records, ids=ids)
    print(result)
    

if __name__ == '__main__':
    # segment()
    # train()
    # get_word_count_df()
    # get_vectors_df()
    # merge_dataframes()
    # insert_es()
    # insert_milvus()
    pass
