# 实现对选择题，阅读题，翻译题，作文题的添加 原题库错题库
import json

import numpy as np
from elasticsearch import Elasticsearch
import os
import uuid
from openai import OpenAI

# 必要的配置项
os.environ["http_proxy"] = "http://127.0.0.1:10808"
os.environ["https_proxy"] = "http://127.0.0.1:10808"
openai_key = "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"


# 添加选择题数据到数据库
def add_choice_data_es(
    index_name, question_type, original_vector, keyword_vector, keyword_text
):
    """
    将选择题的各项数据添加到es
    :param index_name:  wrong_question original_question
    :param question_type:
    :param original_vector:
    :param keyword_vector:
    :param keyword_text:
    :return:
    """
    id = uuid.uuid4()
    es = Elasticsearch(["http://localhost:9200"])
    body = {
        "id": id,
        "question_type": question_type,
        "original_vector": original_vector,
        "keyword_vector": keyword_vector,
        "keyword_text": keyword_text,
        "reading_type_vector": np.ones(1536).tolist(),
        "field_vector": np.ones(1536).tolist(),
        "field": "无",
        "genre": "无",
        "genre_vector": np.ones(1536).tolist(),
    }
    res = es.create(index=index_name, id=id, body=body)
    # print(res)
    return json.dumps(res)


def get_gpt_embedding(api_key, text):
    """
    实现文本嵌入
    :param api_key: gpt的key
    :param text: 需要计算嵌入的文本
    :return: 向量【1536】的numpy数组
    """
    client = OpenAI(api_key=api_key)
    model = "text-embedding-ada-002"
    emb_req = client.embeddings.create(input=[text], model=model)
    emb = emb_req.data[0].embedding
    return np.array(emb)


# 将阅读题添加到数据库
def add_reading_data_es(
    index_name,
    question_type,
    original_vector,
    keyword_vector,
    keyword_text,
    reading_type_vector,
    field_vector,
    field,
):
    """

    :param index_name:
    :param question_type:
    :param original_vector:
    :param keyword_vector:
    :param keyword_text:
    :param reading_type_vector:
    :param field_vector:
    :param field:
    :return:
    """
    id = uuid.uuid4()
    es = Elasticsearch(["http://localhost:9200"])
    body = {
        "id": id,
        "question_type": question_type,
        "original_vector": original_vector,
        "keyword_vector": keyword_vector,
        "keyword_text": keyword_text,
        "reading_type_vector": reading_type_vector,
        "field_vector": field_vector,
        "field": field,
        "genre": "无",
        "genre_vector": np.ones(1536).tolist(),
    }
    res = es.create(index=index_name, id=id, body=body)
    # print(res)
    return json.dumps(res)


# 将翻译题添加到数据库
def add_translation_data_es(
    index_name,
    question_type,
    original_vector,
    keyword_vector,
    keyword_text,
    field_vector,
    field,
):
    """

    :param index_name:
    :param question_type:
    :param original_vector:
    :param keyword_vector:
    :param keyword_text:
    :param field_vector:
    :param field:
    :return:
    """
    id = uuid.uuid4()
    es = Elasticsearch(["http://localhost:9200"])
    body = {
        "id": id,
        "question_type": question_type,
        "original_vector": original_vector,
        "keyword_vector": keyword_vector,
        "keyword_text": keyword_text,
        "reading_type_vector": np.ones(1536).tolist(),
        "field_vector": field_vector,
        "field": field,
        "genre": "无",
        "genre_vector": np.ones(1536).tolist(),
    }
    res = es.create(index=index_name, id=id, body=body)
    # print(res)
    return json.dumps(res)


def add_writing_data_es(
    index_name,
    question_type,
    original_vector,
    keyword_vector,
    keyword_text,
    field_vector,
    field,
    genre,
    genre_vector,
):
    """

    :param index_name:
    :param question_type:
    :param original_vector:
    :param keyword_vector:
    :param keyword_text:
    :param field_vector:
    :param field:
    :param genre:
    :param genre_vector:
    :return:
    """
    id = uuid.uuid4()
    es = Elasticsearch(["http://localhost:9200"])
    body = {
        "id": id,
        "question_type": question_type,
        "original_vector": original_vector,
        "keyword_vector": keyword_vector,
        "keyword_text": keyword_text,
        "reading_type_vector": np.ones(1536).tolist(),
        "field_vector": field_vector,
        "field": field,
        "genre": genre,
        "genre_vector": genre_vector,
    }
    res = es.create(index=index_name, id=id, body=body)
    # print(res)
    return json.dumps(res)


# 查询文本相似度最高的几道题
def search_similar_orginal_vector(index_name, query_vector, question_type, top_k):
    # print(query_embedding.tolist())
    query = {
        "query": {
            "script_score": {
                "query": {"match": {"question_type": question_type}},
                "script": {
                    "source": "cosineSimilarity(params.queryVector, 'original_vector') + 1.0",
                    "lang": "painless",
                    "params": {"queryVector": query_vector},
                },
            }
        },
        "size": top_k,
    }
    es = Elasticsearch(["http://localhost:9200"])
    res = es.search(index=index_name, body=query)
    hits = res["hits"]["hits"]
    # print(hits)
    similar_documents = []
    for hit in hits:
        # similar_documents.append(hit['_source'])
        similar_documents.append(hit["_id"])
    return similar_documents


# 查询关键词相似度，最高的20个
def search_similar_keyword_vector(index_name, query_vector, question_type):
    # print(query_embedding.tolist())
    query = {
        "query": {
            "script_score": {
                "query": {"match": {"question_type": question_type}},
                "script": {
                    "source": "cosineSimilarity(params.queryVector, 'keyword_vector') + 1.0",
                    "lang": "painless",
                    "params": {"queryVector": query_vector},
                },
            }
        },
        "size": 20,
    }
    es = Elasticsearch(["http://localhost:9200"])
    res = es.search(index=index_name, body=query)
    hits = res["hits"]["hits"]
    # print(hits)
    similar_documents = []
    for hit in hits:
        similar_documents.append(hit["_source"])
        # similar_documents.append(hit['_id'])
        # similar_documents.append(hit)

    return similar_documents


# 查询领域领域相似度，最高20个
def search_similar_field_vector(index_name, query_vector, question_type):
    # print(query_embedding.tolist())
    query = {
        "query": {
            "script_score": {
                "query": {"match": {"question_type": question_type}},
                "script": {
                    "source": "cosineSimilarity(params.queryVector, 'field_vector') + 1.0",
                    "lang": "painless",
                    "params": {"queryVector": query_vector},
                },
            }
        },
        "size": 20,
    }
    es = Elasticsearch(["http://localhost:9200"])
    res = es.search(index=index_name, body=query)
    hits = res["hits"]["hits"]
    # print(hits)
    similar_documents = []
    for hit in hits:
        similar_documents.append(hit["_source"])
        # similar_documents.append(hit['_id'])
        # similar_documents.append(hit)

    return similar_documents


# 按阅读题型分类，最高20个
def search_similar_reading_type_vector(index_name, query_vector, question_type):
    # print(query_embedding.tolist())
    query = {
        "query": {
            "script_score": {
                "query": {"match": {"question_type": question_type}},
                "script": {
                    "source": "cosineSimilarity(params.queryVector, 'reading_type_vector') + 1.0",
                    "lang": "painless",
                    "params": {"queryVector": query_vector},
                },
            }
        },
        "size": 20,
    }
    es = Elasticsearch(["http://localhost:9200"])
    res = es.search(index=index_name, body=query)
    hits = res["hits"]["hits"]
    # print(hits)
    similar_documents = []
    for hit in hits:
        similar_documents.append(hit["_source"])
        # similar_documents.append(hit['_id'])
        # similar_documents.append(hit)

    return similar_documents


#
# index_name = "original_question"
# question_type = "reading"
# original_vector = (np.ones(1536) * 3).tolist()
# keyword_vector = (np.ones(1536) * 3).tolist()
# keyword_text = "这是一个阅读题"
# reading_type_vector=(np.ones(1536) * 3).tolist()
# field_vector = (np.ones(1536) * 3).tolist()
# field = "科技"
# # add_choice_data_es(index_name=index_name,question_type=question_type,original_vector=original_vector,keyword_vector=keyword_vector,keyword_text=keyword_text)
# res = search_similar_orginal_vector(index_name,original_vector,question_type,3)
# print(res)
# # add_reading_data_es(index_name,question_type,original_vector,keyword_vector,keyword_text,reading_type_vector,field_vector,field)
