baseURL = 'https://api-inference.modelscope.cn/v1'
apiKey = 'ms-6158e511-4efb-46bc-a903-3892fa40fae4'
import os
os.environ["OPENAI_API_KEY"] = apiKey

from langchain_community.vectorstores import Chroma
from langchain_community.embeddings.modelscope_hub import ModelScopeEmbeddings
from langchain_community.document_loaders import TextLoader
from langchain_openai import ChatOpenAI
from langchain_text_splitters import CharacterTextSplitter
from langchain_classic.chains import ConversationalRetrievalChain
from langchain_classic.memory import ConversationBufferMemory

modelName = 'Qwen/Qwen3-Embedding-0.6B'


documents = TextLoader(file_path='./我的遥远的清平湾.txt', encoding='utf-8').load()
documents = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0).split_documents(documents=documents)

embeddings = ModelScopeEmbeddings(model_id=modelName)
vectorstore = Chroma.from_documents(documents=documents, embedding=embeddings)

memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
qa = ConversationalRetrievalChain.from_llm(llm=ChatOpenAI(
    model='ZhipuAI/GLM-4.6',
    temperature=0,
    base_url=baseURL,
), memory=memory, retriever=vectorstore.as_retriever())
print('\n')
while True:
    query = input()
    if query == 'q':
        break
    result = qa({"question": query})
    print(result["answer"])
