baseURL = 'https://api-inference.modelscope.cn/v1'
apiKey = 'ms-6158e511-4efb-46bc-a903-3892fa40fae4'
import os

os.environ["OPENAI_API_KEY"] = apiKey

from langchain_community.vectorstores import Chroma
from langchain_community.embeddings.modelscope_hub import ModelScopeEmbeddings
from langchain_community.document_loaders import TextLoader
from langchain_openai import ChatOpenAI
from langchain_text_splitters import CharacterTextSplitter
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser

modelName = 'Qwen/Qwen3-Embedding-0.6B'

documents = TextLoader(file_path='./我的遥远的清平湾.txt', encoding='utf-8').load()
documents = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0).split_documents(documents=documents)

embeddings = ModelScopeEmbeddings(model_id=modelName)
vectorstore = Chroma.from_documents(documents=documents, embedding=embeddings)
retriever = vectorstore.as_retriever(search_kwargs={"k": 1})

prompt = ChatPromptTemplate.from_messages([
    ("system", "根据以下上下文回答问题：\n{context}"),
    ("human", "{question}")
])
llm = ChatOpenAI(
    model='ZhipuAI/GLM-4.6',
    temperature=0,
    base_url=baseURL,
)

chain = (
        RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
        | prompt
        | llm
        | StrOutputParser()
)
print('\n')

while True:
    question = input()
    if question == 'q':
        break
    print(chain.invoke(question))
