import logging
from collections import namedtuple
from pathlib import Path

import coloredlogs
import gradio as gr
import requests
import torch
import yaml
from transformers import AutoModelForCausalLM, AutoTokenizer

from models import ModelManager

log = logging.getLogger(__name__)


def format_message_history(messages):
    sys_prompt = [[x["content"], None] for x in messages if x["role"] == "system"]
    msgs = [x["content"] for x in messages if x["role"] != "system"]
    # every two messages are a pair
    pairs = [msgs[i : i + 2] for i in range(0, len(msgs), 2)]
    return sys_prompt + pairs


def dummy_generate_text(sys_prompt, custom_sys_prompt, prompt, *args, **kwargs):
    count = torch.randint(1, 10, (1,)).item()
    return (
        f"Input:\n{prompt}\n\nSys:\n{custom_sys_prompt or sys_prompt}\n\n"
        + "Dummy output\n" * count
    )


def reset_dialog(messages: list):
    return [], [[None, None]]


def check_balance(config):
    try:
        headers = {
            "Content-Type": "application/json",
            "Accept": "application/json",
            "Authorization": f"Bearer {config.online_key}",
        }
        resp = requests.post(config.balance_url, headers=headers)
        resp = resp.json()
        return float(resp["grants"]["available_amount"])
    except Exception as e:
        log.error(f"Error checking balance: {e}")
        return None
