# Import the base Model class from Flask-AppBuilder.
from flask_appbuilder import Model
# Import column types and Index construct from SQLAlchemy.
from sqlalchemy import Column, Index, Integer, String, Text

# Import a custom audit mixin for tracking creation/modification times and users.
from myapp.models.helpers import AuditMixinNullable
# Import a utility module for string operations, like MD5 hashing.
from myapp.utils import strings


# Get the metadata object from the base Flask-AppBuilder Model.
# This metadata is shared across all models and holds the schema information.
metadata = Model.metadata


# Define the EvaluationTask class, which maps to the 'evaluation_task' table.
# This model represents a single task for evaluating one or more machine learning models.
class EvaluationTask(Model, AuditMixinNullable):
    # Specify the table name in the database.
    __tablename__ = 'evaluation_task'
    # Define the primary key column, which auto-increments.
    id = Column(Integer, primary_key=True, autoincrement=True, comment='id')
    # The name of the evaluation task.
    name = Column(String(200), nullable=True, comment='评测任务名称')
    # The type of evaluation being performed.
    type = Column(String(200), nullable=True, comment='评测类型')
    # The category of the model being evaluated (e.g., "Large Language Model").
    model_type = Column(String(200), nullable=True, comment='模型类型 (语言大模型)')
    # A JSON string containing information about the models to be evaluated,
    # including their service IDs and specific parameter configurations.
    models = Column(Text, nullable=True, comment='评测模型信息，包含模型服务 ID 和参数配置')
    # The foreign key ID linking to the evaluation dataset used for this task.
    dataset_id = Column(Integer, nullable=True, comment='评测数据集 ID')
    # A JSON string defining the scoring rules, including evaluation dimensions and criteria.
    evaluation_dimension = Column(Text, nullable=True, comment='评分规则，包含评测维度和评分标准')
    # A description of the evaluation task.
    desc = Column(String(200), nullable=True, comment='评测任务描述')
    # The current status of the evaluation task (e.g., "pending", "running", "completed").
    status = Column(String(100), nullable=True, comment='评测任务状态')
    # The total number of question-answer pairs in the dataset.
    total_cnt = Column(Integer, nullable=False, comment='QA对数量')
    # The number of valid question-answer pairs that were actually used in the evaluation.
    valid_cnt = Column(Integer, nullable=False, comment='有效QA对数量')
    # A JSON string storing the final results of the evaluation.
    result = Column(Text, nullable=True, comment='评测结果')
    # The geographical or logical region where the task is executed.
    region = Column(
        String(100), nullable=False, default='default', server_default='default', comment='地区'
    )
    # A field to store any error message if the task fails.
    err_msg = Column(String(200), nullable=False, default='', comment='错误消息')
    # The mode of evaluation (e.g., "manual", "automatic").
    mode = Column(
        String(100), nullable=False, default='manual', server_default='manual', comment='评测模式'
    )
    # The methods used for evaluation, stored as a JSON array string.
    method = Column(
        String(100), nullable=False, default='[]', server_default='[]', comment='评测方法'
    )

    # Define table-level arguments, in this case, an index.
    __table_args__ = (
        # Creates an index on the 'created_on' column to speed up queries that sort or filter by creation time.
        Index('ix_created_on', 'created_on'),
    )


# Define the EvaluationDataset class, which maps to the 'evaluation_dataset' table.
# This model represents a single data point (e.g., a question-answer pair) within an evaluation task.
class EvaluationDataset(Model, AuditMixinNullable):
    # Specify the table name in the database.
    __tablename__ = 'evaluation_dataset'
    # Define the primary key column, which auto-increments.
    id = Column(Integer, primary_key=True, autoincrement=True, comment='id')
    # A foreign key linking this data point to a specific evaluation task.
    task_id = Column(Integer, nullable=True, comment='测评任务ID')
    # The input part of the data point (e.g., the question).
    input = Column(Text, nullable=True, comment='输入')
    # The expected or ground-truth output (e.g., the answer).
    output = Column(Text, nullable=True, comment='输出')
    # A hash of the input and output to uniquely identify the data point.
    hash = Column(String(100), nullable=False, comment='QA对hash')
    # A JSON string to store detailed results, such as model outputs and scores for this data point.
    detail = Column(Text, nullable=True, comment='推理输出和打分')
    # The scoring status for this specific data point (e.g., "scored", "pending").
    status = Column(String(100), nullable=True, comment='打分状态')
    # The geographical or logical region.
    region = Column(
        String(100), nullable=False, default='default', server_default='default', comment='地区'
    )

    # Define table-level arguments.
    __table_args__ = (
        # Creates a unique composite index on 'task_id' and 'hash'.
        # This prevents duplicate question-answer pairs from being added to the same task.
        Index('ix_task_id_hash', 'task_id', 'hash', unique=True),
    )

    @classmethod
    def calc_hash(cls, input: str, output: str):
        """
        Calculates an MD5 hash for a given input and output string pair.
        This is used to generate a unique identifier for each data point.
        """
        # Ensure input is not None before hashing.
        if input is None:
            input = ''
        # Ensure output is not None before hashing.
        if output is None:
            output = ''

        # Concatenate the input and output and calculate the MD5 hash.
        return strings.md5(input + output)
