from langchain.memory import ConversationSummaryBufferMemory
from typing import Any, Dict, List

from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.pydantic_v1 import root_validator

from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.summary import SummarizerMixin


class ConversationSummaryRedisMemory(ConversationSummaryBufferMemory):
    """Redis with summarizer for storing conversation memory."""

    buffer_begin: int = 0

    @property
    def buffer(self) -> List[BaseMessage]:
        return self.chat_memory.messages[self.buffer_begin:]

    def prune(self) -> None:
        """Prune buffer if it exceeds max token limit"""
        buffer = self.buffer
        curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
        if curr_buffer_length > self.max_token_limit:
            pruned_memory = []
            while curr_buffer_length > self.max_token_limit:
                pruned_memory.append(buffer.pop(0))
                self.buffer_begin += 1
                curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
            self.moving_summary_buffer = self.predict_new_summary(
                pruned_memory, self.moving_summary_buffer
            )
