from llama_index.core.workflow import Event
from llama_index.core.schema import NodeWithScore




from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
from llama_index.core.response_synthesizers import CompactAndRefine
from llama_index.core.postprocessor.llm_rerank import LLMRerank
from llama_index.core.workflow import (
    Context,
    Workflow,
    StartEvent,
    StopEvent,
    step,
)


from llama_index.embeddings.openai import OpenAIEmbedding

# 定义两个事件
class M001Event(Event):
    nodes: list[NodeWithScore]


class M002Event(Event):
    nodes: list[NodeWithScore]

class Wf(Workflow):
    @step
    async  def startHandler(self, ctx: Context, ev: StartEvent) -> StopEvent |M001Event|  None:
        print('startHandler')
        await asyncio.sleep(1)
        ctx.send_event(M002Event(nodes=[]))
        return M001Event(nodes=[])

    @step
    async  def M001EventHandler(
            self, ctx: Context, ev: M001Event) -> M001Event | StopEvent | None:
        # 带query参数，表示生成阶段
        print('M001EventHandler')
        await asyncio.sleep(1)



    @step
    async  def M002EventHandler(self, ctx: Context, ev: M002Event ) -> StopEvent:

        events = ctx.collect_events(ev, [M002Event])
        if events is None:
            print('M002EventHandler')
            return StopEvent()
        print('M002EventHandler end ')
        await asyncio.sleep(1)
        return StopEvent(nodes=[])


import asyncio
wf= Wf()
async def main():
   context = Context(wf)
   await wf.run(context,name='ok')

if __name__ == '__main__':

    asyncio.run(main())




