import logging
import random
from typing import Any, List

from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType
from backend.data.model import SchemaField
from backend.blocks.llm import AIConversationBlock, AIStructuredResponseGeneratorBlock, LlmModel, APIKeyCredentials, AICredentials, AICredentialsField, TEST_CREDENTIALS, TEST_CREDENTIALS_INPUT


logger = logging.getLogger(__name__)


class CollectionFacadeBlock(AIConversationBlock):
    PROMPT_TEMPLATE: str = """
    你是我的工作助手，我给你一段文本，你需要把这段文本内容做分类。我给你的文本可以分为以下三类：
    1. idea：以“我的想法”开头。
    2. action：以"我要做"开头。
    3. checklist：以"检查列表"开头。
    请你对我给你的以下文本做分类，只返回给我分类的英文名称即可。
    {collection_input}
    """
    
    class Input(BlockSchema):
        collection_input: str = SchemaField(
            description="collection input"
        )
        model: LlmModel = SchemaField(
            title="LLM Model",
            default=LlmModel.GPT4_TURBO,
            description="The language model to use for the conversation.",
        )
        credentials: AICredentials = AICredentialsField()
        max_tokens: int | None = SchemaField(
            advanced=True,
            default=None,
            description="The maximum number of tokens to generate in the chat completion.",
        )

    class Output(BlockSchema):
        collection_output: str = SchemaField(
            description="collection output"
        )
        error: str = SchemaField(description="Error message if the API call failed.")
    
    def __init__(self):
        Block.__init__(
            self,
            id="e7f458ff-dd1d-4962-ac0b-0b12479f5bc6",
            description="Advanced LLM call that takes a prompt and sends them to the language model.",
            categories={BlockCategory.AI},
            input_schema=CollectionFacadeBlock.Input,
            output_schema=CollectionFacadeBlock.Output
        )

    def run(
        self, input_data: Input, *, credentials: APIKeyCredentials, **kwargs
    ) -> BlockOutput:
        collection_input = input_data.collection_input
        prompt = self.PROMPT_TEMPLATE.format(collection_input=collection_input)
        print(f"Hello6666 --- {prompt}")
        msg_list = [ {"role": "user", "content": prompt} ]
        rsp = self.llm_call(
            AIStructuredResponseGeneratorBlock.Input(
                prompt="",
                credentials=input_data.credentials,
                model=input_data.model,
                conversation_history=msg_list,
                max_tokens=input_data.max_tokens,
                expected_format={},
            ),
            credentials=credentials,
        )

        collection_output = rsp
        yield "collection_output", collection_output
