# https://developer.aliyun.com/article/1501662
import os
import sys
import uuid

import torch
from llama_index.legacy import VectorStoreIndex, ServiceContext, PromptTemplate
from llama_index.legacy.embeddings import HuggingFaceEmbedding
from llama_index.legacy.llms import HuggingFaceLLM
from llama_index.legacy.readers import MilvusReader
from transformers import AutoTokenizer, AutoModelForCausalLM

import logging

from utils import project_setting

# from llama_index.embeddings import HuggingFaceEmbedding
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
# from llama_index.llms import HuggingFaceLLM
# from llama_index.prompts import PromptTemplate


from llama_index.core import Settings
from llama_index.core.callbacks import CallbackManager, LlamaDebugHandler

print('初始化 llama_debug')
llama_debug = LlamaDebugHandler(print_trace_on_end=True)
from langfuse.llama_index import LlamaIndexCallbackHandler

print('初始化 langfuse')
langfuse_callback_handler = LlamaIndexCallbackHandler(
    trace_name='demo',
    user_id='sks',
    session_id=str(uuid.uuid1()),
    public_key=os.getenv('LANGFUSE_PUBLIC_KEY'),
    secret_key=os.getenv('LANGFUSE_SECRET_KEY'),
    host="https://us.cloud.langfuse.com"
)

# Settings.callback_manager = CallbackManager([langfuse_callback_handler,llama_debug])
Settings.callback_manager = CallbackManager([langfuse_callback_handler])

query = "介绍一下广州大学"


# https://modelscope.cn/headlines/article/373?spm=a2c6h.12873639.article-detail.14.6f8ff2f3yWclUr

def initLlm1():
    yuan_path = "G:\\encode_repository\\code"
    print("Yuan2-2B-Februa Creat tokenizer...")
    tokenizer = AutoTokenizer.from_pretrained(yuan_path, add_eos_token=False, add_bos_token=False, eos_token='<eod>')
    tokenizer.add_tokens(
        ['<sep>', '<pad>', '<mask>', '<predict>', '<FIM_SUFFIX>', '<FIM_PREFIX>', '<FIM_MIDDLE>', '<commit_before>',
         '<commit_msg>', '<commit_after>', '<jupyter_start>', '<jupyter_text>', '<jupyter_code>', '<jupyter_output>',
         '<empty_output>'], special_tokens=True)
    print("Yuan2-2B-Februa Creat model...")
    model = AutoModelForCausalLM.from_pretrained(yuan_path, torch_dtype=torch.bfloat16, trust_remote_code=True)
    device_map = torch.cuda.current_device() if torch.cuda.is_available() else torch.device('cpu')
    model = model.to(device_map)

    # 原来代码中的 start
    # model = model.to("cpu")
    llm = HuggingFaceLLM(
        # context_window=2048,
        max_new_tokens=1024,
        generate_kwargs={"temperature": 0.25, "do_sample": False, "repetition_penalty": 1.2, "max_length": 2048},
        # query_wrapper_prompt=query_wrapper_prompt,
        tokenizer=tokenizer,
        model=model,
        # tokenizer_name=yuan_path,
        # model_name=yuan_path,
        device_map="auto",
        # tokenizer_kwargs={"max_length": 2048},
        # uncomment this if using CUDA to reduce memory usage
        model_kwargs={"torch_dtype": torch.float16, "trust_remote_code": True}
    )
    return llm


def initLlm2():
    from llama_index.llms.openai import OpenAI
    llm = OpenAI(model="qwen2.5-coder:7b",
                 api_base="http://192.168.56.1:11434/v1",
                 api_key="aa", temperature=0.7)
    return llm


print("Creat llm model...")
# llm=initLlm1()
llm = initLlm2()

print("Creat embedding model...")
# embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-zh-v1.5", trust_remote_code=True)
from pymilvus import model as pyModel
embed_model = pyModel.DefaultEmbeddingFunction()
# embed_model = pyModel.DefaultEmbeddingFunction(model_name="BAAI/bge-small-zh-v1.5")

def text_embedding(text:str):
    # _tmp_msg= embed_model.get_text_embedding(str)
    #DefaultEmbeddingFunction 创建的模型
    _tmp_msg= embed_model._to_embedding(text)
    return _tmp_msg

# milvus 中如果 collection_name 不存在则创建
milvus_collection_name = 'demo'
milvus_username = 'root'
milvus_password = 'Milvus'
from pymilvus import MilvusClient, FieldSchema, DataType, CollectionSchema, Collection, connections

class MilvusClientExtended(MilvusClient):
    def drop_connection(self):
        connections.disconnect("default")

print(" Create MilvusClient")
client = MilvusClientExtended(
    uri="http://localhost:19530",
    token=f"{milvus_username}:{milvus_password}",
)

def getMilvusData():
    chunk_list = []
    with open(f'{project_setting.project_path}/knowledge.txt', 'r', encoding='utf-8') as file:
        line = file.readline()
        while line:
            # Generate embeddings using encoder from HuggingFace.
            embeddings = text_embedding(line)
            print(f'len(embeddings)={len(embeddings)}')
            # chunk_list.append(embeddings)
            chunk_list.append(
                {"vector": embeddings, "text": line, "subject": "历史"}
            )
            line = file.readline()
    return chunk_list


def initMilvusCollection():
    fmt = "\n=== {:30} ===\n"
    # 1. connect to Milvus
    connections.connect("default", host="localhost", port="19530")
    # 2. define collection
    fields = [
        FieldSchema("pk", DataType.INT64, is_primary=True, auto_id=True),
        FieldSchema("vector", DataType.FLOAT_VECTOR, dim=768), ]
    schema = CollectionSchema(fields, f"{milvus_collection_name} is the simplest demo to introduce the APIs")
    print(fmt.format(f"Create collection `{milvus_collection_name}`"))
    hello_milvus = Collection(milvus_collection_name, schema, consistency_level="Strong")
    # 3. insert data
    chunk_list=getMilvusData()
    insert_result = hello_milvus.insert(chunk_list)
    hello_milvus.flush()
    # 4. create index
    index = {
        "index_type": "AUTOINDEX",
        "metric_type": "COSINE",
    }
    hello_milvus.create_index("vector", index)
def initMilvusCollection2():
    # client.create_database(milvus_collection_name)
    # schema = MilvusClient.create_schema(
    #     auto_id=False,
    #     enable_dynamic_field=True,
    # )
    index_params = client.prepare_index_params()
    index_params.add_index(
        field_name="vector",
        index_type="AUTOINDEX",
        metric_type="COSINE"
    )

    # client.create_collection(
    #     collection_name=milvus_collection_name,
    #     schema=schema,
    #     index_params=index_params
    # )

def initMilvusCollection3():
    client.create_collection(
        collection_name=milvus_collection_name,
        dimension=768,  # 用于此次演示的向量具有768个维度
    )
    chunk_list=getMilvusData()
    client.insert(collection_name=milvus_collection_name, data=chunk_list)



if client.has_collection(milvus_collection_name) is False:
    # initMilvusCollection2()
    initMilvusCollection3()
else:
    """测试的,存在则先删除集合再创建"""
    client.drop_collection(milvus_collection_name)
    # initMilvusCollection2()
    initMilvusCollection3()

# load documents
reader = MilvusReader(
    host="localhost", port=19530, user=milvus_username, password=milvus_password, use_secure=False
)

# Example query vector:
documents = reader.load_data(
    query_vector= text_embedding(query),
    collection_name=milvus_collection_name,
    limit=5
)
service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
index = VectorStoreIndex.from_documents(
    documents, service_context=service_context, show_progress=True
)
# define prompts that are used in llama-index, {query_str} is user's question,{context_str} is content queried by milvus
query_engine = index.as_query_engine(**{"text_qa_template": PromptTemplate(
    ("背景：{context_str}"
     "问题: {query_str}\n")
)})
response = query_engine.query(query)
print(response)

