import logging
import random
from typing import Any, List

from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType
from backend.data.model import SchemaField
from backend.blocks.llm import AIConversationBlock, AIStructuredResponseGeneratorBlock, LlmModel, APIKeyCredentials, AICredentials, AICredentialsField, TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT

from .my_game_helpers import *


logger = logging.getLogger(__name__)


class DealCardsBlock(AIConversationBlock):
    PROMPT_TEMPLATE: str = """
    Generate 4 random natural numbers between 1 and 13, include 1 and 13. Just return 4 numbers in an array, don't include other content. The returned array should not be repeated with the following arrays:
    {old_arrays}
    """
    
    class Input(BlockSchema):
        deal_cards_input: str = SchemaField(
            description="deal cards input"
        )
        model: LlmModel = SchemaField(
            title="LLM Model",
            default=LlmModel.GPT4_TURBO,
            description="The language model to use for the conversation.",
        )
        credentials: AICredentials = AICredentialsField()
        max_tokens: int | None = SchemaField(
            advanced=True,
            default=None,
            description="The maximum number of tokens to generate in the chat completion.",
        )

    class Output(BlockSchema):
        cards_posted: str = SchemaField(
            description="Check result to the expression."
        )
        error: str = SchemaField(description="Error message if the API call failed.")
    
    def __init__(self):
        Block.__init__(
            self,
            id="7c99272d-1275-4f7a-9b98-363d02570174",
            description="Advanced LLM call that takes a prompt and sends them to the language model.",
            categories={BlockCategory.AI},
            input_schema=DealCardsBlock.Input,
            output_schema=DealCardsBlock.Output
        )

    def run(
        self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
    ) -> BlockOutput:
        old_cards_arrays = get_old_cards_arrays()       
        logger.info(f"used old_arrays is :{old_cards_arrays}")

        prompt = self.PROMPT_TEMPLATE.format(old_arrays=old_cards_arrays)
        msg_list = [ {"role": "user", "content": prompt} ]
        rsp = self.llm_call(
            AIStructuredResponseGeneratorBlock.Input(
                prompt="",
                credentials=input_data.credentials,
                model=input_data.model,
                conversation_history=msg_list,
                max_tokens=input_data.max_tokens,
                expected_format={},
            ),
            credentials=credentials,
        )

        cards_posted = rsp
        save_cards_cache(cards_posted)
        yield "cards_posted", cards_posted


class MachineGiveExpressionBlock(Block):
    class Input(BlockSchema):
        help_input: str = SchemaField(
            description="help input",
        )

    class Output(BlockSchema):
        expression: Any = SchemaField(description="The expression of 24 point game.")

    def __init__(self):
        super().__init__(
            id="51c434dc-0abf-4d87-a3ee-c1ae4373fa3b",
            input_schema=MachineGiveExpressionBlock.Input,
            output_schema=MachineGiveExpressionBlock.Output,
            description="give expression by machine.",
            categories={BlockCategory.LOGIC, BlockCategory.LOGIC},
            block_type=BlockType.STANDARD
        )

    def run(self, input_data: Input, **kwargs) -> BlockOutput:
        last_cards_posted = get_last_cards_posted()
                
        if last_cards_posted == "":
            yield "expression", "expression not found"
        else:
            point_list = json.loads(last_cards_posted)
            expressions = get_cached_expressions(point_list)

            result = "expression not found"
            if len(expressions) > 0:
                random_idx = random.randint(0, len(expressions)-1)
                result = expressions[random_idx]

            yield "expression", result


class CheckExpressionBlock(AIConversationBlock):
    PROMPT_TEMPLATE: str = """
    Calculate the result of this conditional expression 'abs(({expression}) - 24) < 0.1'.
    Just return "True" or "False", don't return other content.
    """

    class Input(BlockSchema):
        expression: str = SchemaField(
            description="The expression of 24 points game."
        )
        model: LlmModel = SchemaField(
            title="LLM Model",
            default=LlmModel.GPT4_TURBO,
            description="The language model to use for the conversation.",
        )
        credentials: AICredentials = AICredentialsField()
        max_tokens: int | None = SchemaField(
            advanced=True,
            default=None,
            description="The maximum number of tokens to generate in the chat completion.",
        )

    class Output(BlockSchema):
        check_result: str = SchemaField(
            description="Check result to the expression."
        )
        last_cards_posted: str = SchemaField(
            description="last cards posted."
        )
        error: str = SchemaField(description="Error message if the API call failed.")
    
    def __init__(self):
        prompt = self.PROMPT_TEMPLATE.format(expression="9 / 3 * 6 + 8")

        Block.__init__(
            self,
            id="50a4446b-71b6-4a17-bb72-82ead2be1c0e",
            description="Advanced LLM call that takes a prompt and sends them to the language model.",
            categories={BlockCategory.AI},
            input_schema=CheckExpressionBlock.Input,
            output_schema=CheckExpressionBlock.Output
        )

    def run(
        self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
    ) -> BlockOutput:
        last_cards_posted = get_last_cards_posted()
        
        expression = input_data.expression
        if expression == "expression not found":
            yield "check_result", "Correct"
            yield "last_cards_posted", last_cards_posted
        else:
            prompt = self.PROMPT_TEMPLATE.format(expression=expression)

            msg_list = [ {"role": "user", "content": prompt} ]
            rsp = self.llm_call(
                AIStructuredResponseGeneratorBlock.Input(
                    prompt="",
                    credentials=input_data.credentials,
                    model=input_data.model,
                    conversation_history=msg_list,
                    max_tokens=input_data.max_tokens,
                    expected_format={},
                ),
                credentials=credentials,
            )
            
            check_result = extract_result(rsp)
            yield "check_result", check_result
            yield "last_cards_posted", last_cards_posted

class GetHumanReplyBlock(Block):
    class Input(BlockSchema):
        cards_posted: str = SchemaField(
            description="The cards posted.",
            advanced=False
        )

    class Output(BlockSchema):
        user_input: Any = SchemaField(
            description="user input"
        )

    def __init__(self):
        super().__init__(
            id="c9fd89db-2d2b-46e8-81fa-6e59ff91a349",
            input_schema=GetHumanReplyBlock.Input,
            output_schema=GetHumanReplyBlock.Output,
            description="Get Human Reply.",
            categories={BlockCategory.INPUT, BlockCategory.BASIC},
            # block_type=BlockType.INPUT,
            block_type=BlockType.STANDARD
        )

    def run(self, input_data: Input, **kwargs) -> BlockOutput:
        cards_posted = input_data.cards_posted
        if cards_posted != "":
            point_list = json.loads(cards_posted)
            card_list = get_random_card_list(point_list)
            cards_posted_content = f"{{'card_list': {card_list}, 'point_list': {point_list}}}"
            logger.info(f"cards posted: {cards_posted_content}")

        input_list = [
            "deal",
            "help",
            "exit",
            "12 + 2 * (1 + 5)",
            "5 * 3 - 7 + 11",
        ]
        input_idx = random.randint(0, 4)
        yield "user_input", input_list[input_idx]
