from llama_index.core.workflow import (StartEvent, StopEvent, Workflow, step,Event)
from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.llms.openai import OpenAI
from llama_index.core import SimpleDirectoryReader
from llama_index.core.node_parser import SimpleNodeParser
from llama_index.core import  GPTVectorStoreIndex,VectorStoreIndex
from llama_index.llms import openai_like
from llama_index.core import Settings
from llama_index.llms.ollama import Ollama
from llama_index.embeddings.huggingface import HuggingFaceEmbedding  # HuggingFaceEmbedding:用于将文本转换为词向量
from llama_index.llms.huggingface import HuggingFaceLLM  # HuggingFaceLLM：用于运行Hugging Face的预训练语言模型
from llama_index.core import Settings,SimpleDirectoryReader,VectorStoreIndex
import chromadb
from llama_index.embeddings.dashscope import DashScopeEmbedding
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core import StorageContext, load_index_from_storage
from llama_index.llms.deepseek  import DeepSeek
from llama_index.embeddings.fastembed import FastEmbedEmbedding

# 初始化LLM模型
llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm

class PsEvent(Event):
    name:str

class DocumentQAWorkflow(Workflow):
    @step
    async def load_documents(self, ev: StartEvent) -> PsEvent:
        """加载并处理文档"""
        documents = SimpleDirectoryReader("data").load_data()
        index = VectorStoreIndex.from_documents(documents)
        return PsEvent(name='ok')
        

    

    @step
    async def generate_answer(self, ev: PsEvent) -> StopEvent:
        """生成最终答案"""
        return StopEvent(result=f"基于检索结果生成的答案：{ev}")

wf=DocumentQAWorkflow()
output=wf.run("hello")
print(output)