import asyncio
from typing import List

from llama_index.core.agent.workflow import  FunctionAgent
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.storage.chat_store.sql import SQLAlchemyChatStore
from llama_index.core.vector_stores import SimpleVectorStore
from llama_index.core.schema import  TextNode
from llama_index.core import Settings, SimpleKeywordTableIndex, SummaryIndex, get_response_synthesizer
from llama_index.embeddings.zhipuai import ZhipuAIEmbedding
from llama_index.core.graph_stores import SimplePropertyGraphStore
from llama_index.core.schema import Document
from pydantic import BaseModel

embed_model = ZhipuAIEmbedding(
    model="embedding-2",
    api_key="f387f5e4837d4e4bba6d267682a957c9.PmPiTw8qVlsI2Oi5"
    # With the `embedding-3` class
    # of models, you can specify the size
    # of the embeddings you want returned.
    # dimensions=1024
)
Settings.embed_model=embed_model

from llama_index.llms.deepseek import DeepSeek

llm = DeepSeek(model="deepseek-chat", api_key="sk-605e60a1301040759a821b6b677556fb")
Settings.llm = llm
from llama_index.core.extractors.metadata_extractors import (
    KeywordExtractor,
    PydanticProgramExtractor,
    QuestionsAnsweredExtractor,
    SummaryExtractor,
    TitleExtractor,
)
from llama_index.core.extractors.document_context import DocumentContextExtractor


text="""2022年6月14日，江苏南京大报恩塔与“超级月亮”相映成景。 """
node=TextNode(text=text)

from llama_index.core.memory.chat_memory_buffer import ChatMemoryBuffer
from llama_index.core.memory.chat_summary_memory_buffer import ChatSummaryMemoryBuffer
from llama_index.core.memory.types import BaseMemory
from llama_index.core.memory.vector_memory import VectorMemory
from llama_index.core.memory.simple_composable_memory import SimpleComposableMemory
from llama_index.core.memory.memory import Memory, BaseMemoryBlock, InsertMethod
from llama_index.core.memory.memory_blocks import (
    StaticMemoryBlock,
    VectorMemoryBlock,
    FactExtractionMemoryBlock,
)
memory = Memory.from_defaults(session_id="my_session", token_limit=40000)
memory.put_messages(
    [
        ChatMessage(role="user", content="Hello, world!"),
        ChatMessage(role="assistant", content="Hello, world to you too!"),
    ]
)
chat_history = memory.get()


async def main():

    qQLAlchemyChatStore = SQLAlchemyChatStore(table_name='chat_history')

    await qQLAlchemyChatStore.add_message(key='002', message=ChatMessage(role="user", content="Hello, world!"))
    await qQLAlchemyChatStore.add_message(key='002', message=ChatMessage(role="user", content="Hello, world!"))

    cnt =await qQLAlchemyChatStore.count_messages(key='002')
    print(cnt)


if __name__ == '__main__':

    asyncio.run(main())





