#!/usr/bin/env python
# @Author  : Bright (brt2@qq.com)
# @Create  : 2025-09-05
# @Update  : 2025-09-18 / 0.1.0

import os
import time
import base64
import re
import requests
import json
# import google.generativeai as genai

from .putils import make_logger

class GeminiModelApi:
    """ 用于调用Google Gemini API的接口 """

    # ---------- 初始化 ----------
    def __init__(self, logger_level=20, logger_path=None):
        self.logger = make_logger("GeminiModelApi", logger_path, level=logger_level)
        self._init_prompt__()

    def connect(self, api_key=None, model_tag=None, **kwargs):
        """
        连接到Gemini API
        :param api_key: API密钥，如果为None则从环境变量GEMINI_API_KEY获取
        :param model_tag: 模型名称，如果为None则从环境变量GEMINI_MODEL_NAME获取
        :param kwargs: 其他参数
        """
        self.api_key = api_key or os.environ.get("GEMINI_API_KEY", "")
        self.model_name = model_tag or os.environ.get("GEMINI_MODEL_NAME", "gemini-2.5-flash-lite")

        # Gemini API的基础URL
        self.base_url = "https://generativelanguage.googleapis.com/v1beta/models"
        self.logger.info(f"Connect to Gemini API with model【{self.model_name}】")

        # 默认参数
        self.params = {
            "key": self.api_key,
        }
        self.option = {
            "temperature": 0.15,
            "topP": 0.9,
            **kwargs
        }

    def _init_prompt__(self):
        self.system_input = "You are a helpful assistant."
        self.user_input = ""
        self.assistant_input = ""
        self.message_tmpl = [
            {"role": "user", "content": self.system_input},
        ]
        self.reset_chat_history()

    def reset_chat_history(self):
        self.message_history = self.message_tmpl.copy()

    # ---------- 工具函数 ----------
    @staticmethod
    def load_img(image_path: str) -> str:
        """加载图片并转为 base64"""
        with open(image_path, 'rb') as image_file:
            binary_data = image_file.read()
        return base64.b64encode(binary_data).decode('utf-8')

    def _build_input(self, question=None, img_data: list = None):
        """生成 input 内容（文本 + 图片）"""
        content = []
        if question:
            content.append({"text": question})
        if img_data:
            for img in img_data:
                content.append({
                    "inlineData": {
                        "mimeType": "image/jpeg",
                        "data": img
                    }
                })
        return content

    # ---------- Generate ----------
    def generate(self, question=None, img_data: list = None, ret_str=True):
        """一次性生成，不记忆上下文"""
        contents = []

        # 添加系统提示（作为第一个用户消息）
        if self.system_input:
            contents.append({
                "role": "user",
                "parts": [{"text": self.system_input}]
            })

        # 添加当前问题
        content_parts = self._build_input(question, img_data)
        contents.append({
            "role": "user",
            "parts": content_parts
        })

        self.logger.debug(f">>> Prompt: {contents}")

        # 构建请求体
        payload = {
            "contents": contents,
            "generationConfig": self.option
        }

        # 发送请求
        url = f"{self.base_url}/{self.model_name}:generateContent"
        response = requests.post(url, params=self.params, json=payload)

        if response.status_code == 200:
            result = self.parse_generate(response.json()) if ret_str else response.json()
            return result
        else:
            self.logger.error(f"Gemini API request failed with status {response.status_code}: {response.text}")
            raise Exception(f"Gemini API request failed: {response.text}")

    def parse_generate(self, response):
        """解析 generate 的结果"""
        self.logger.debug(f">>> Response: {response}")
        try:
            # 提取生成的文本
            candidates = response.get("candidates", [])
            if candidates:
                content = candidates[0].get("content", {})
                parts = content.get("parts", [])
                if parts:
                    return parts[0].get("text", "")
            return ""
        except Exception as e:
            self.logger.error(f"Failed to parse response: {e}")
            return ""

    def generate_until_finish(self, question=None, img_data: list = None, ret_str=True, wait_time=5, max_try=10):
        """ 由于API可能频率受到限制，持续catch异常，直至成功完成调用 """
        for i in range(max_try):
            try:
                return self.generate(question, img_data, ret_str)
            except Exception as e:
                self.logger.warning(f"Gemini API Error: {str(e)}\tWill try again.")
                time.sleep(wait_time)

        raise Exception(f"Failed to complete chat after max_try={max_try} times.")

    # ---------- Chat ----------
    def chat(self, question=None, img_data: list = None, ret_str=True):
        """多轮对话，自动缓存上下文"""
        # 构建当前轮次的内容
        content_parts = self._build_input(question, img_data)
        current_message = {
            "role": "user",
            "parts": content_parts
        }

        # 将当前消息添加到历史记录
        self.message_history.append(current_message)
        self.logger.debug(f">>> Prompt: {self.message_history}")

        # 构建请求体
        payload = {
            "contents": self.message_history,
            "generationConfig": self.option
        }

        # 发送请求
        url = f"{self.base_url}/{self.model_name}:generateContent"
        response = requests.post(url, params=self.params, json=payload)

        if response.status_code == 200:
            msg = self.parse_chat(response.json())

            # 将模型回复加入上下文
            self.message_history.append({"role": "model", "parts": [{"text": msg}]})
            return msg if ret_str else response.json()
        else:
            self.logger.error(f"Gemini API request failed with status {response.status_code}: {response.text}")
            raise Exception(f"Gemini API request failed: {response.text}")

    def parse_chat(self, response):
        """解析chat的结果"""
        try:
            # 提取生成的文本
            candidates = response.get("candidates", [])
            if candidates:
                content = candidates[0].get("content", {})
                parts = content.get("parts", [])
                if parts:
                    return parts[0].get("text", "")
            return ""
        except Exception as e:
            self.logger.error(f"Failed to parse response: {e}")
            return ""

    def parse_as_json(self, json_markdown_str):
        """ 从Markdown字符串中提取JSON内容 """
        self.logger.debug(f">>> Input: {json_markdown_str}")
        pattern = r'```json\s*([^`]+)\s*```' # Match the json object
        match = re.search(pattern, json_markdown_str, re.DOTALL)
        if match:
            try:
                return match.group(1)
            except Exception as e:
                self.logger.warning(f"Failed to parse JSON from LLM response: {e}")
                return None
        else:
            self.logger.warning(f"Failed to parse JSON from LLM response")
            return None

    # ---------- 图像生成 ----------
    def generate_image(self, prompt, size="1024x1024", seed=None, reference_images=None):
        """生成图像，支持参考图像"""
        if not self.api_key:
            raise ValueError("GEMINI_API_KEY environment variable not set")

        width, height = map(int, size.split('x'))

        # 构建请求体
        contents = []

        # 如果有参考图像，添加到内容中
        if reference_images:
            content_parts = [{"text": prompt}]
            # 处理参考图像
            if isinstance(reference_images, str):
                # 单个图像（可能是路径或base64数据）
                # if reference_images.startswith('/'):
                #     # 假设是文件路径
                #     img_base64 = self.load_img(reference_images)
                # else:  # base64数据
                img_base64 = reference_images
                content_parts.append({
                    "inlineData": {
                        "mimeType": "image/jpeg",
                        "data": img_base64
                    }
                })
            elif isinstance(reference_images, list):
                # 多个图像
                for img_data in reference_images:
                    assert isinstance(img_data, str), f"无法识别的img-data内容，type(img_data): {type(img_data)}"
                    # if img_data.startswith('/'):
                    #     # 假设是文件路径
                    #     img_base64 = self.load_img(img_data)
                    # else:  # 假设是base64数据
                    img_base64 = img_data
                    content_parts.append({
                        "inlineData": {
                            "mimeType": "image/jpeg",
                            "data": img_base64
                        }
                    })
            contents.append({"parts": content_parts})
        else:
            # 没有参考图像的情况
            contents.append({
                "parts": [{
                    "text": prompt
                }]
            })

        request_data = {
            "contents": contents,
            "generationConfig": {
                "temperature": 0.4,
                "maxOutputTokens": 2048
            }
        }

        # 添加seed参数（如果提供）
        if seed is not None:
            request_data["generationConfig"]["seed"] = seed

        url = f"{self.base_url}/{self.model_name}:generateContent"

        # 发送请求到Gemini API
        response = requests.post(url, params=self.params, json=request_data)

        if response.status_code == 200:
            response_data = response.json()
            image_data = None
            candidates = response_data.get("candidates", [])
            if candidates:
                content = candidates[0].get("content", {})
                parts = content.get("parts", [])
                if parts:
                    # 查找图像数据
                    for part in parts:
                        if "inlineData" in part:
                            image_data = part["inlineData"]
                            break

            # 返回完整的响应信息
            result = {
                "prompt": prompt,
                "model": self.model_name,
                "size": size,
                "seed": seed,
                "reference_images": reference_images,
                "image_data": image_data,
                "full_response": response_data
            }

            self.logger.info("Image generation successful")
            return result
        else:
            error_msg = f"Image generation failed with status {response.status_code}: {response.text}"
            self.logger.error(error_msg)
            raise Exception(error_msg)

def test_chat():
    api = GeminiModelApi()
    api.connect()

    response = api.generate("做个简短的自我介绍")
    print("Generate 输出:\n", response, "\n")

def test_image():
    api = GeminiModelApi()
    api.connect(model_tag="gemini-2.5-flash-image-preview")

    image_response = api.generate_image("一只可爱的猫咪", "1024x1024", seed=12345)
    print("图像生成输出:\n", image_response)

    # 测试带参考图像的生成
    # image_response_with_ref = api.generate_image("一只可爱的猫咪", "1024x1024", seed=12345, reference_images=["/path/to/reference/image.jpg"])
    # print("带参考图像的图像生成输出:\n", image_response_with_ref)


if __name__ == "__main__":
    # test_chat()
    test_image()
