from elasticsearch import Elasticsearch
from transformers import BertTokenizer, BertModel
import torch
import pandas as pd


# 获取嵌入
def embeddings_doc(doc, tokenizer, model, max_length=300):
    encoded_dict = tokenizer.encode_plus(
        doc,
        add_special_tokens=True,
        max_length=max_length,
        padding="max_length",
        truncation=True,
        return_attention_mask=True,
        return_tensors="pt",
    )
    input_id = encoded_dict["input_ids"]
    attention_mask = encoded_dict["attention_mask"]

    # 前向传播
    with torch.no_grad():
        outputs = model(input_id, attention_mask=attention_mask)

    # 提取最后一层的CLS向量作为文本表示
    last_hidden_state = outputs.last_hidden_state
    cls_embeddings = last_hidden_state[:, 0, :]
    return cls_embeddings[0]


def add_doc(index_name, id, embedding_filed, filed, exeID, es):
    body = {"embedding": embedding_filed.tolist(), "field": filed, "id": exeID}
    result = es.create(index=index_name, id=id, doc_type="_doc", body=body)
    return result


def search_similar(index_name, query_text, tokenizer, model, es, top_k=3):
    query_embedding = embeddings_doc(query_text, tokenizer, model)
    print(query_embedding.tolist())
    query = {
        "query": {
            "script_score": {
                "query": {"match_all": {}},
                "script": {
                    "source": "cosineSimilarity(params.queryVector, 'embedding') + 1.0",
                    "lang": "painless",
                    "params": {"queryVector": query_embedding.tolist()},
                },
            }
        },
        "size": top_k,
    }
    res = es.search(index=index_name, body=query)
    hits = res["hits"]["hits"]
    similar_documents = []
    for hit in hits:
        similar_documents.append(hit["_source"])
    return similar_documents


def main():
    # 模型下载的地址
    # model_name = "E:\\study\\AIstudy\\python-api\\chinese-roberta-wwm-ext-large"
    model_name = "E:\\study\\AIstudyNode\\chinese-roberta-wwm-ext-large"
    # ES 信息
    es_host = "http://127.0.0.1"
    es_port = 9200
    es_user = "elastic"
    es_password = "elastic"
    index_name = "exercise"

    # 数据地址
    path = "E:\\study\\AIstudy\\python-api\\exerciseData2.csv"

    # 分词器和模型
    tokenizer = BertTokenizer.from_pretrained(model_name)
    model = BertModel.from_pretrained(model_name)

    # ES 连接
    es = Elasticsearch("http://localhost:9200")

    # 读取数据写入ES
    data = pd.read_csv(path, encoding="ANSI")
    for index, row in data.iterrows():
        # 写入前 5000 条进行测试
        if index >= 500:
            break
        exeID = row["exeID"]
        field = row["field"]
        # 文本转向量
        embedding_field = embeddings_doc(field, tokenizer, model)
        result = add_doc(index_name, index, embedding_field, field, exeID, es)
        print(result)

    query_text = "社会学,经济学,语言学,政治学,计算机科学,生物学,化学,物理学"

    similar_documents = search_similar(index_name, query_text, tokenizer, model, es)
    for item in similar_documents:
        print("================================")
        print("field", item["field"])
        print("id", item["id"])


if __name__ == "__main__":
    #     main()
    similar_res = [1, 2, 3]
    for i in range(3):
        print(similar_res[i])
