from elasticsearch import Elasticsearch
from elasticsearch_dsl import connections, Index, Document, Text, Integer, Boolean, Keyword, Search, Q, DenseVector
import numpy as np
from sparkai.llm.llm import ChatSparkLLM, ChunkPrintHandler
from sparkai.core.messages import ChatMessage
import os
import gpt.gpt_key as gpy_key

os.environ["http_proxy"] = "http://127.0.0.1:7890"
os.environ["https_proxy"] = "http://127.0.0.1:7890"
spark_key = gpy_key.gpt_key

# 连接es数据库
connections = connections.create_connection(hosts=["http://localhost:9200"], timeout=20)

# 星火大模型相关配置
SPARKAI_URL = 'wss://spark-api.xf-yun.com/v3.1/chat'
SPARKAI_APP_ID = '1d38b17a'
SPARKAI_API_SECRET = 'MTJjOGQ5YzE1ZWZjYjJkOWUxZTM4Mzcx'
SPARKAI_API_KEY = 'b33d686049c006fd12a7e9b345ec0409'
SPARKAI_DOMAIN = 'generalv3'

spark = ChatSparkLLM(
    spark_api_url=SPARKAI_URL,
    spark_app_id=SPARKAI_APP_ID,
    spark_api_key=SPARKAI_API_KEY,
    spark_api_secret=SPARKAI_API_SECRET,
    spark_llm_domain=SPARKAI_DOMAIN,
    streaming=False,
)

class Questions(Document):
    id = Integer()
    field = Text()
    field_vector = DenseVector(1536)
    original_vector = DenseVector(1536)

    class Index:
        name = "questions"

def get_gpt_embedding(api_key, text):
    """
    实现文本嵌入
    :param api_key: gpt的key
    :param text: 需要计算嵌入的文本
    :return: 向量【1536】的numpy数组
    """
    messages = [ChatMessage(role="user", content=text)]
    handler = ChunkPrintHandler()
    response = spark.generate([messages], callbacks=[handler])
    emb = response['embedding']  # 假设星火大模型返回的嵌入向量在 'embedding' 字段中
    return np.array(emb)

test = "你好呀"
test_field = "人与自然"
res = get_gpt_embedding(api_key=spark_key, text=test)
res_field = get_gpt_embedding(api_key=spark_key, text=test_field)
question1 = Questions(id=1, field="人与自然", field_vector=res_field, original_vector=res)
question1.save()
