import json

from llama_index.core import StorageContext, VectorStoreIndex
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.core.indices.service_context import ServiceContext
from llama_index.vector_stores.elasticsearch import ElasticsearchStore

from llama_index.core import Document
from elasticsearch import Elasticsearch

if __name__ == '__main__':
    local_index = "local_llm_index"
    openai_index = "ada_002_index"
    abstract_index = "abstract_index"
    openai_ancient_index = "ancient_small_index"
    local_ancient_index = "ancient_bge_index"
    es = ElasticsearchStore(
        index_name="ancient_bge_index",
        es_url="http://10.6.26.37:9200",
        es_user="elastic",
        es_password="fortune@123",
    )
    # 存储繁体摘要到es
    index_name = "abstract_index"
    kv_es = Elasticsearch(hosts="http://10.6.26.37:9200", basic_auth=('elastic', 'fortune@123'))
    # 读取文档封装docs
    docs = []
    excluded_embed_metadata_keys = ["@id", "chsTitle", "chtTitle", "classification", "contribution", "abstractCht"]
    excluded_llm_metadata_keys = ["@id", "chsTitle", "chtTitle", "classification", "contribution", "abstractCht"]
    with (open("./data/siku-rel-1000.jsonl", "r", encoding="utf-8") as f):
        try:
            for line in f.readlines():
                line = json.loads(line)
                if not line["contribution"]:
                    line["contribution"] = [{"chsAgent": "无", "chtAgent": "无"}]
                # 拼接书名和作者到摘要
                doc_text = f'书名：{line["chtTitle"]}，作者：{line["contribution"][0]["chsAgent"]}，'
                # 简体
                # doc_text += f'分类：{line["classification"]["fullname"]}，摘要：{line["abstractChs"]}'
                # 古文
                doc_text += f'分类：{line["classification"]["fullname"]}，摘要：{line["abstractCht"]}'
                metadata = {
                    "@id": line["@id"],
                    "chsTitle": line["chsTitle"],
                    "chtTitle": line["chtTitle"],
                    # "edition": line["edition"],
                    # "label": line["label"],
                    # "chsEdition": line["chsEdition"],
                    "classification": {
                        "@id": line["classification"]["@id"],
                        "fullname": line["classification"]["fullname"]
                    },
                    # "temporalValue": line["temporalValue"],
                    # "seriesDescription": line["seriesDescription"],
                    "contribution": line["contribution"],
                    "abstractCht": line["abstractCht"]
                }
                docs.append(Document(
                    text=doc_text,
                    metadata=metadata,
                    excluded_embed_metadata_keys=excluded_embed_metadata_keys,
                    excluded_llm_metadata_keys=excluded_llm_metadata_keys,
                    metadata_seperator="::",
                    metadata_template="{key}=>{value}",
                ))
                # 繁体摘要存储到es
                # doc_cht = {"key": line["@id"], "value": line["abstractCht"]}
                # kv_es.index(index=index_name, document=doc_cht)
                # kv_es.indices.refresh(index=index_name)
        except Exception as e:
            print(line)
            print(e)

    embed_model = HuggingFaceEmbedding(model_name="./bge-large-zh-v1.5", max_length=512)
    # 加载GPT的embedding
    # embed_model = AzureOpenAIEmbedding(
    #     model="text-embedding-ada-002",
    #     azure_endpoint="https://fortunegptapp.openai.azure.com/",
    #     api_key="7445d0b6d7e844dcbbe133ee83d95c33",
    #     embed_batch_size=1536,
    #     api_version="2023-07-01-preview",
    #     azure_deployment="embed",
    # )
    # 加载openai的embedding
    # embed_model = OpenAIEmbedding(
    #     api_key="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",
    #     api_base="http://45.79.30.81:8088/v1",
    #     model="text-embedding-3-small",
    #     dimension=1536
    # )
    service_context = ServiceContext.from_defaults(
        llm=None,
        embed_model=embed_model,
        chunk_size=512,
    )

    nodes = service_context.node_parser.get_nodes_from_documents(docs)
    storage_context = StorageContext.from_defaults(vector_store=es)
    storage_context.docstore.add_documents(nodes)

    index = VectorStoreIndex(
        nodes=nodes,
        storage_context=storage_context,
        service_context=service_context,
    )

