import json
from typing import List, Dict, Optional
import logging
import uuid
from openai import OpenAI

from label_studio_ml.model import LabelStudioMLBase
from label_studio_ml.response import ModelResponse
import os
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()
logger = logging.getLogger(__name__)
# 验证env是否生效
logger.info(f"读取的 LS 地址是：{os.environ.get('LABEL_STUDIO_URL')}")
# 初始化 OpenAI 客户端
client = OpenAI(
            api_key="sk-*****",
            base_url="https://api.deepseek.com",  # deepseek 是兼容 OpenAI API 的替代服务
        )

class DeepSeekTextClassifier(LabelStudioMLBase):

    def get_labels(self):
        li = self.label_interface
        from_name, _, _ = li.get_first_tag_occurence('Choices', 'Text')
        tag = li.get_tag(from_name)
        return tag.labels

    def setup(self):
        """Configure any parameters of your model here
        """
        self.set("model_version", "0.0.1")

    def predict(self, tasks: List[Dict], context: Optional[Dict] = None, **kwargs) -> ModelResponse:
        """使用 DeepSeek API 实现文本分类预测，并将结果按 Label Studio 标准格式返回。"""
        # 获取标签定义和字段映射信息
        li = self.label_interface
        from_name, to_name, value = li.get_first_tag_occurence('Choices', 'Text')
        labels = self.get_labels()
        predictions = []

        for task in tasks:
            #添加判断是文本字符串还是文件路径
            data_value = task['data'].get(value)
            print("任务数据:", data_value)
            if not data_value:
                logger.warning(f"任务 {task['id']} 缺少字段 '{value}'")
                continue

            try:
                if isinstance(data_value,str) and data_value.startswith('/data/'):
                    # 如果是字符串且以 '/data/' 开头，认为是url文件路径
                    # 将 URL 路径转换为本地绝对路径（自动下载或使用缓存）
                    file_url = task['data'][value]
                    try:
                        local_file = self.get_local_path(file_url, task_id=task['id'])
                        print("本地文件路径:", local_file)
                        # 读取文本内容
                        with open(local_file, 'r', encoding='utf-8') as f:
                            text = f.read()

                    except Exception as e:
                        logger.error(f"读取任务文件失败：{file_url}, 错误信息: {e}")
                        continue
                else:
                    # 如果是文本字符串，提取文本
                    text = self.preload_task_data(task, task['data'][value])
            except Exception as e:
                logger.error(f"任务 {task['id']} 的数据处理失败：{e}")
                continue
            # 2. 构造 system prompt（让 DeepSeek 按预期返回 JSON 格式）
            system_prompt = f"""你是一名文本分类助手。请根据输入文本进行分类，并仅从以下标签中选择一个最合适的：{labels}。
            请严格返回 JSON 格式结果，例如如：{{"label": "Positive", "score": 0.92}}。不要包含多余解释。"""

            try:
                # 3. 调用 DeepSeek 进行推理
                response = client.chat.completions.create(
                    model="deepseek-chat",
                    messages=[
                        {"role": "system", "content": system_prompt},
                        {"role": "user", "content": text}
                    ],
                    response_format={
                        'type': 'json_object'
                    },
                    temperature=0
                )

                # 4. 获取并解析返回内容
                result_text = response.choices[0].message.content
                try:
                    result = json.loads(result_text)
                except json.JSONDecodeError as e:
                    logger.error(f"DeepSeek 返回的内容无法解析为 JSON: {result_text}")
                    continue

                predicted_label = result.get("label")
                score = result.get("score")

                # 5. 校验标签合法性
                if predicted_label not in labels:
                    logger.warning(f"非法标签: {predicted_label} 不在 {labels} 中")
                    continue

                # 6. 构造预测结果（符合 LS 格式）
                prediction_result = {
                    "model_version": self.get("model_version"),
                    "score": score,
                    "result": [{
                        "id": str(uuid.uuid4()),  # 生成唯一 ID
                        "from_name": from_name,
                        "to_name": to_name,
                        "type": "choices",
                        "value": {
                            "choices": [predicted_label]
                        }
                    }]
                }

                predictions.append(prediction_result)

            except Exception as e:
                logger.error(f"DeepSeek 推理失败：{e}")
                continue

        return ModelResponse(predictions=predictions)
    
    def fit(self, event, data, **kwargs):
        """
        This method is called each time an annotation is created or updated
        You can run your logic here to update the model and persist it to the cache
        It is not recommended to perform long-running operations here, as it will block the main thread
        Instead, consider running a separate process or a thread (like RQ worker) to perform the training
        :param event: event type can be ('ANNOTATION_CREATED', 'ANNOTATION_UPDATED', 'START_TRAINING')
        :param data: the payload received from the event (check [Webhook event reference](https://labelstud.io/guide/webhook_reference.html))
        """

        # use cache to retrieve the data from the previous fit() runs
        old_data = self.get('my_data')
        old_model_version = self.get('model_version')
        print(f'Old data: {old_data}')
        print(f'Old model version: {old_model_version}')

        # store new data to the cache
        self.set('my_data', 'my_new_data_value')
        self.set('model_version', 'my_new_model_version')
        print(f'New data: {self.get("my_data")}')
        print(f'New model version: {self.get("model_version")}')

        print('fit() completed successfully.')

