# -*- coding: utf-8 -*-
import sys
import json
from IPython.display import Markdown, display
from llama_index.llms.ollama import Ollama
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.postprocessor import SimilarityPostprocessor
from llama_index.embeddings.fastembed import FastEmbedEmbedding
from llama_index.core import SQLDatabase
from llama_index.core import Settings
from llama_index.core.query_engine import NLSQLTableQueryEngine
from sqlalchemy import create_engine
import openai
import logging

openai.api_key = ''
openai.base_url = ''
from llama_index.llms.openai import OpenAI
llm = OpenAI(temperature=0.19, model="gpt-3.5-turbo")
# self.llm = Ollama(model="codeqwen:7b-chat", base_url="http://localhost:11434",request_timeout=60000.0)
embed_model = FastEmbedEmbedding(model_name="BAAI/bge-small-zh-v1.5") 
Settings.llm = llm
Settings.embed_model = embed_model
# an example prompt
prompt = "What animals are llamas related to?"
import chromadb
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext
# load some documents
documents = SimpleDirectoryReader(r"E:\100rag\data\ui").load_data()
# initialize client, setting path to save data
db = chromadb.PersistentClient(path=r"E:\100rag\chroma")

# create collection
chroma_collection = db.get_or_create_collection("doc-ui")

# assign chroma as the vector_store to the context
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)

# create your index
index = VectorStoreIndex.from_documents(
    documents, storage_context=storage_context
)

print('load ok')