import datetime
import functools
import json
import math
import os
import time
from collections import defaultdict
from contextlib import contextmanager
from typing import Union

from chromadb import Documents, EmbeddingFunction, Embeddings
import chromadb
from pytz import timezone
from sqlalchemy import create_engine, Column, Integer, JSON, DateTime, asc, String, Text, and_, func, or_, cast, event, Boolean, text
from sqlalchemy.orm import sessionmaker, aliased
from sqlalchemy.ext.declarative import declarative_base
from odps import ODPS, options
import one_runtime
import util
from script import EvalInfo, EvalData

# 确保 ALIBABA_CLOUD_ACCESS_KEY_ID 环境变量设置为用户 Access Key ID，
# ALIBABA_CLOUD_ACCESS_KEY_SECRET 环境变量设置为用户 Access Key Secret，

# 导入时读取数据，可使用个人dev账号，方便来说就是dev表随便读
options.tunnel.use_instance_tunnel = True
options.tunnel.limit_instance_tunnel = False  # 关闭limit限制，读取全部数据。
read_odps = None
if one_runtime.get_config('READ_ODPS_ACCESS_ID'):
    read_odps = ODPS(one_runtime.get_config('READ_ODPS_ACCESS_ID'), one_runtime.get_config('READ_ODPS_ACCESS_KEY'), project=one_runtime.get_config('READ_ODPS_PROJECT'), endpoint=one_runtime.get_config('READ_ODPS_ENDPOINT', 'http://service-corp.odps.aliyun-inc.com/api'), )

# 应用账号永久权限写入账号(导出时用)，请在project手动创建表[alsc_merchant_agi_script_node_data]并给该账号赋权
write_odps = None
if one_runtime.get_config('WRITE_ODPS_ACCESS_ID'):
    write_odps = ODPS(one_runtime.get_config('WRITE_ODPS_ACCESS_ID'), one_runtime.get_config('WRITE_ODPS_ACCESS_KEY'), project=one_runtime.get_config('WRITE_ODPS_PROJECT'), endpoint=one_runtime.get_config('WRITE_ODPS_ENDPOINT', 'http://service-corp.odps.aliyun-inc.com/api'), )
    if not one_runtime.get_config('WRITE_ODPS_TABLE'):
        raise ValueError('please set config WRITE_ODPS_TABLE')

Base = declarative_base()
Session = None
if one_runtime.get_config('SCRIPT_MYSQL_USERNAME'):
    # 用你自己的MySQL数据库配置替换下面的参数
    mysql_username = one_runtime.get_config('SCRIPT_MYSQL_USERNAME')
    mysql_password = one_runtime.get_config('SCRIPT_MYSQL_PASSWORD')
    mysql_host = one_runtime.get_config('SCRIPT_MYSQL_HOST')
    mysql_database = one_runtime.get_config('SCRIPT_MYSQL_DATABASE')
    # mysql_username = 'root'
    # mysql_password = 'wsd521=='
    # mysql_host = 'localhost'
    # mysql_database = 'test'

    # 创建MySQL数据库引擎
    engine = create_engine(f'mysql+mysqlconnector://{mysql_username}:{mysql_password}@{mysql_host}/{mysql_database}?use_pure=True&ssl_disabled=True&charset=utf8mb4&collation=utf8mb4_unicode_ci')
    Session = sessionmaker(bind=engine)


def sql_monitor(detail=False, original_result=False):
    '''
    sql监控器
    :param detail: 是否输出详情
    :param original_result: 是否输出结果详情
    :return:
    '''

    def decorator(func):
        @functools.wraps(func)
        def wrapper(*args, **kwargs):
            start_time = time.time()
            error = None
            result = None
            try:
                result = func(*args, **kwargs)
                return result
            except Exception as e:
                error = type(e).__name__  # 如果发生异常，状态码为异常类型
                raise
            finally:
                end_time = time.time()
                log_obj = {}
                if detail == False:
                    log_obj = {
                        'func': func.__name__,
                        'cost': int((end_time - start_time) * 1000),
                        "error": error
                    }
                else:
                    if result is None:
                        result_desc = 'null'
                    elif original_result == True:
                        result_desc = str(result)
                    elif isinstance(result, int):
                        result_desc = str(result)
                    elif isinstance(result, (dict, list, set, tuple)):
                        result_desc = 'len=' + str(len(result))
                    else:
                        result_desc = 'unknown'
                    log_obj = {
                        'func': func.__qualname__,
                        'cost': int((end_time - start_time) * 1000),
                        "error": error,
                        'kwargs': kwargs,
                        'result_desc': result_desc
                    }
                util.log('sql_monitor', log_obj)

        return wrapper

    return decorator


class BaseDbStorage:
    @contextmanager
    def session_scope(self):
        """Provide a transactional scope around a series of operations."""
        if not Session:
            raise ValueError('please check sys config SCRIPT_MYSQL_USERNAME,SCRIPT_MYSQL_PASSWORD,SCRIPT_MYSQL_HOST,SCRIPT_MYSQL_DATABASE')
        session = Session()
        try:
            yield session
            session.commit()
        except Exception:
            session.rollback()
            raise
        finally:
            session.close()

    @sql_monitor(detail=True)
    def execute_sql(self, sql):
        with self.session_scope() as session:
            result = session.execute(sql)
            session.commit()
            if result.returns_rows:
                rows = result.fetchall()
                keys = result.keys()
                return rows, keys
            else:
                return [[result.rowcount]], ['rowcount']


# 定义映射到 script_node_data 数据表的 ScriptNodeData 类
class ScriptNodeData(Base):
    __tablename__ = 'script_node_data'

    id = Column(Integer, primary_key=True, autoincrement=True)
    gmt_create = Column(DateTime, server_default=func.now(tz=timezone('Asia/Shanghai')))
    script_name = Column(String(128), nullable=False)
    node = Column(String(128))
    data_group = Column(String(128), nullable=False)
    content_type = Column(String(128))
    content_key = Column(String(512))
    source_node = Column(String(128))
    source_id = Column(Integer)
    req_id = Column(String(128))
    source_nodes = Column(JSON)
    content = Column(Text)
    ext_info = Column(JSON)

    def content_as_json(self):
        # 这里只是保稳起见，理论上从db读取出来应该如果是json那么本身就会记录为json了
        if isinstance(self.content, dict):
            return self.content
        if not self.content:
            return None
        try:
            return json.loads(self.content)
        except:
            return None

    def check_content_as_json(self) -> dict:
        # 这里只是保稳起见，理论上从db读取出来应该如果是json那么本身就会记录为json了
        if isinstance(self.content, dict):
            return self.content
        if not self.content:
            raise ValueError('样本不规范,content为空')
        try:
            return json.loads(self.content)
        except:
            raise ValueError('样本不规范,content必须为json格式')

    def copy(self):
        new_obj = ScriptNodeData()
        new_obj.id = self.id
        new_obj.script_name = self.script_name
        new_obj.node = self.node
        new_obj.data_group = self.data_group
        new_obj.content_type = self.content_type
        new_obj.content_key = self.content_key
        new_obj.source_node = self.source_node
        new_obj.req_id = self.req_id
        new_obj.source_id = self.source_id
        new_obj.source_nodes = self.source_nodes if self.source_nodes else {}
        content = self.content
        try:
            content = json.loads(self.content)
        except:
            pass
        new_obj.content = content
        new_obj.ext_info = self.ext_info if self.ext_info else {}
        new_obj.gmt_create = self.gmt_create
        return new_obj

    def to_json(self):
        return json.dumps(self.to_dict(), ensure_ascii=False, default=str)

    @classmethod
    def from_json(cls, obj: str = None):
        if not obj:
            return ScriptNodeData()
        if isinstance(obj, str):
            obj = json.loads(obj)
        if not isinstance(obj, dict):
            raise ValueError('obj 必须为json对象')
        new_obj = ScriptNodeData()
        new_obj.id = obj.get('id')
        new_obj.script_name = obj.get('script_name')
        new_obj.node = obj.get('node')
        new_obj.data_group = obj.get('data_group')
        new_obj.content_type = obj.get('content_type')
        new_obj.content_key = obj.get('content_key')
        new_obj.source_node = obj.get('source_node')
        new_obj.req_id = obj.get('req_id')
        new_obj.source_id = obj.get('source_id')
        new_obj.source_nodes = obj.get('source_nodes') if obj.get('source_nodes') != None else {}
        content = obj.get('content')
        try:
            if isinstance(content, str):
                content = json.loads(content)
        except:
            pass
        new_obj.content = content
        new_obj.ext_info = obj.get('ext_info') if obj.get('ext_info') else {}
        # 时间戳转换
        new_obj.gmt_create = obj.get('gmt_create')
        if isinstance(new_obj.gmt_create, str):
            new_obj.gmt_create = datetime.datetime.strptime(new_obj.gmt_create, "%Y-%m-%d %H:%M:%S")
        elif isinstance(new_obj.gmt_create, (int, float)):
            # 如果value是浮点数，取其整数部分
            if isinstance(new_obj.gmt_create, float):
                new_obj.gmt_create = int(new_obj.gmt_create)
            new_obj.gmt_create = datetime.datetime.fromtimestamp(new_obj.gmt_create)
        return new_obj

    @classmethod
    def from_id(cls, id: int = None):
        if not id:
            return ScriptNodeData()
        return node_data_storage.select_data_by_id(id)

    def __repr__(self):
        return self.to_json()

    def to_dict(self):
        content = self.content
        try:
            content = json.loads(self.content)
        except:
            pass
        return {
            "id": self.id,
            "script_name": self.script_name,
            "node": self.node,
            "data_group": self.data_group,
            "content_type": self.content_type,
            "content_key": self.content_key,
            "source_node": self.source_node,
            "source_id": self.source_id,
            "req_id": self.req_id,
            "source_nodes": self.source_nodes if self.source_nodes else {},
            "content": content,
            "ext_info": self.ext_info if self.ext_info else {},
            "gmt_create": self.gmt_create
        }


# 创建一个别名，用于表示ScriptNodeData的另一个实例
SourceScriptNodeData = aliased(ScriptNodeData)


def is_nan(value):
    if isinstance(value, (float, int)) and math.isnan(value):
        return True
    if isinstance(value, str) and value.lower() == 'nan':
        return True
    return False


# 定义封装操作 script_node_data 数据表的 ScriptNodeDataStorage 类
class ScriptNodeDataStorage(BaseDbStorage):

    @sql_monitor()
    def add_data(self, script_node_data):
        if not script_node_data:
            return
        with self.session_scope() as session:
            if script_node_data.ext_info is None:
                script_node_data.ext_info = {}
            if script_node_data.source_nodes is None:
                script_node_data.source_nodes = {}
            if script_node_data.content_type is None:
                script_node_data.content_type = 'default'
            # 检查 content 是否为 nan
            if is_nan(script_node_data.content):
                script_node_data.content = None
            if script_node_data.content:
                script_node_data.content = json.dumps(script_node_data.content, ensure_ascii=False, default=str) if isinstance(script_node_data.content, (dict, list)) else script_node_data.content
            session.add(script_node_data)

    @sql_monitor()
    def add_datas(self, script_node_datas):
        if not script_node_datas:
            return
        with self.session_scope() as session:
            for script_node_data in script_node_datas:
                if script_node_data.ext_info is None:
                    script_node_data.ext_info = {}
                if script_node_data.source_nodes is None:
                    script_node_data.source_nodes = {}
                if script_node_data.content_type is None:
                    script_node_data.content_type = 'default'
                # 检查 content 是否为 nan
                if is_nan(script_node_data.content):
                    script_node_data.content = None
                if script_node_data.content:
                    script_node_data.content = json.dumps(script_node_data.content, ensure_ascii=False, default=str) if isinstance(script_node_data.content, (dict, list)) else script_node_data.content

            session.add_all(script_node_datas)

    @sql_monitor(detail=True)
    def clean_repeats(self):
        self.execute_sql('''delete from script_node_data
where req_id not in (
    select req_id
    from (
             select node, data_group, source_id, max(req_id) req_id
             from script_node_data
             where source_id is not null
             group by node, data_group, source_id
         ) a
)
and  source_id is not null''')

    @sql_monitor(detail=True)
    def select_task_datas(self, script_name, node, data_group, source_node, source_group, source_type, begin_time, end_time, search, offset, size):
        source_groups = '\',\''.join(source_group.split(','))
        source_type_str = 'and content_type in (\'' + '\',\''.join(source_type.split(',')) + '\')' if source_type else ''
        begin_time_str = 'and gmt_create >= \'' + begin_time + '\' ' if begin_time else ''
        end_time_str = 'and gmt_create <= \'' + end_time + '\' ' if end_time else ''
        search_str = f'''and (content like '%{search}%' or ext_info like '%{search}%' or content_key like '%{search}%' or
                          id = '{search}' or source_nodes like '%{search}%')''' if search else ''
        limit_str = ''
        if size is not None:
            # 如果未提供 offset，则使用默认值 0
            if offset is None:
                offset = 0
            limit_str = 'limit ' + str(offset) + ',' + str(size)
        sql = f"""
        select * from (select *
                   from script_node_data
                   where script_name = '{script_name}'
                     and node = '{source_node}'
                     and data_group in ('{source_groups}')
                     and content is not null
                     and content != ''
                     {source_type_str}
                     {begin_time_str}
                     {end_time_str}
                     {search_str}
                     {limit_str}
                     ) a
    WHERE NOT EXISTS (
        SELECT 1
        FROM script_node_data b
        WHERE b.source_id = a.id
        AND b.script_name = '{script_name}'
        AND b.node = '{node}'
        AND b.data_group = '{data_group}'
        AND b.source_node = '{source_node}'
    )
        """
        rows, keys = node_data_storage.execute_sql(sql)
        # 创建一个新的列表，每一个元素是一个字典，表示一行数据
        datas = []
        for row in rows:
            node_data = ScriptNodeData()
            # Map values to the ORM object based on column
            for column, val in zip(keys, row):
                setattr(node_data, column, val)
            node_data.source_nodes = json.loads(node_data.source_nodes) if node_data.source_nodes else {}
            node_data.ext_info = json.loads(node_data.ext_info) if node_data.ext_info else {}
            try:
                node_data.content = json.loads(node_data.content)
            except:
                pass
            datas.append(node_data)
        return datas

    @sql_monitor(detail=True)
    def select_task_data_by_id(self, id, script_name, node, data_group):
        sql = f"""
        select * from script_node_data s1
where id={id}
AND NOT EXISTS (
    SELECT 1 FROM script_node_data s2
    WHERE s2.source_id = s1.id
    AND s2.node = '{node}'
    AND s2.script_name = '{script_name}'
    AND s2.data_group = '{data_group}'
)
        """
        rows, keys = node_data_storage.execute_sql(sql)
        if not rows:
            return None
        row = rows[0]
        node_data = ScriptNodeData()
        # Map values to the ORM object based on column
        for column, val in zip(keys, row):
            setattr(node_data, column, val)
        node_data.source_nodes = json.loads(node_data.source_nodes) if node_data.source_nodes else {}
        node_data.ext_info = json.loads(node_data.ext_info) if node_data.ext_info else {}
        try:
            node_data.content = json.loads(node_data.content)
        except:
            pass
        return node_data

    @sql_monitor()
    def import_datas(self, data_group, data_list, pre_check, remark, script_name):
        if not data_group or not script_name:
            raise ValueError('基本信息不全')
        data_list = data_list if data_list else []
        # Create ScriptNodeData objects from the data_list
        script_node_datas = []
        for data in data_list:
            script_node_data = ScriptNodeData()
            script_node_data.script_name = script_name
            script_node_data.data_group = data_group
            script_node_data.node = script_name
            if 'content_key' in data:
                script_node_data.content_key = data.pop('content_key')
            if 'content_type' in data:
                script_node_data.content_type = data.pop('content_type')
            else:
                script_node_data.content_type = 'default'
            script_node_data.req_id = None
            script_node_data.source_node = None
            script_node_data.source_id = None
            script_node_data.source_nodes = None
            remark = remark if remark else str(math.ceil(time.time()))
            if 'ext_info' in data:
                ext_str = data.pop('ext_info')
                script_node_data.ext_info = json.loads(ext_str) if ext_str else None
                script_node_data.ext_info.update({
                    'remark': remark
                })
            else:
                script_node_data.ext_info = {
                    'remark': remark
                }

            if 'content' in data:
                script_node_data.content = data.get('content')
            else:
                script_node_data.content = json.dumps(data, ensure_ascii=False, default=str)
            script_node_data.gmt_create = datetime.datetime.now()
            script_node_datas.append(script_node_data)
        # 预验证
        if pre_check:
            # 验证有没有数据做提示即可，暂时简单做
            count = node_data_storage.count_datas(filters={
                'script_name': script_name,
                'node': script_name,
                'data_group': data_group
            })
            first = script_node_datas[0] if script_node_datas else None
            return {
                'old_count': count,
                'count': len(script_node_datas),
                'first_data': json.loads(first.to_json()) if first else {}
            }
        # Insert the data into the database
        self.add_datas(script_node_datas)
        return {
            'count': len(script_node_datas)
        }

    @sql_monitor()
    def delete_data_by_ids(self, script_node_data_ids):
        if not script_node_data_ids:
            return
        with self.session_scope() as session:
            session.query(ScriptNodeData).filter(ScriptNodeData.id.in_(script_node_data_ids)).delete(synchronize_session=False)

    @sql_monitor(detail=True)
    def update_data_by_id(self, script_node_data_id, update_dict):
        if not script_node_data_id or not update_dict:
            return

        with self.session_scope() as session:
            script_node_data = session.query(ScriptNodeData).filter_by(id=script_node_data_id).first()
            if script_node_data:
                for key, value in update_dict.items():
                    if hasattr(script_node_data, key):
                        setattr(script_node_data, key, value)

    @sql_monitor(detail=True)
    def select_data_by_id(self, script_node_data_id) -> ScriptNodeData:
        if not script_node_data_id:
            return None
        with self.session_scope() as session:
            script_node_data = session.query(ScriptNodeData).filter_by(id=script_node_data_id).first()
            return script_node_data.copy() if script_node_data else None

    @sql_monitor(detail=True)
    def select_datas(self, filters=None, offset=None, size=None) -> list[ScriptNodeData]:
        if filters is None:
            filters = {}

        with self.session_scope() as session:
            query = session.query(ScriptNodeData)

            # 根据提供的过滤条件筛选结果
            if 'node' in filters:
                if isinstance(filters['node'], (list, set, tuple)):
                    query = query.filter(ScriptNodeData.node.in_(filters['node']))
                else:
                    query = query.filter(ScriptNodeData.node == filters['node'])

            if 'source_node' in filters:
                if isinstance(filters['source_node'], (list, set, tuple)):
                    query = query.filter(ScriptNodeData.source_node.in_(filters['source_node']))
                else:
                    query = query.filter(ScriptNodeData.source_node == filters['source_node'])

            if 'content_type' in filters:
                content_type_ = filters['content_type']
                # 如果源是多个
                if not content_type_:
                    pass
                elif ',' in content_type_:
                    query = query.filter(ScriptNodeData.content_type.in_(content_type_.split(',')))
                else:
                    query = query.filter(ScriptNodeData.content_type == content_type_)

            if 'script_name' in filters:
                if isinstance(filters['script_name'], (list, set, tuple)):
                    query = query.filter(ScriptNodeData.script_name.in_(filters['script_name']))
                else:
                    query = query.filter(ScriptNodeData.script_name == filters['script_name'])

            if 'data_group' in filters:
                group_ = filters['data_group']
                # 如果源是多个
                if not group_:
                    pass
                elif ',' in group_:
                    query = query.filter(ScriptNodeData.data_group.in_(group_.split(',')))
                else:
                    query = query.filter(ScriptNodeData.data_group == group_)

            # 根据 source_ids 列表过滤 source_id
            if 'source_ids' in filters:
                query = query.filter(ScriptNodeData.source_id.in_(filters['source_ids']))
            # 根据 ids 列表过滤 id
            ids = filters.get('ids')
            if ids:
                query = query.filter(ScriptNodeData.id.in_(filters['ids']))

            # 检查是否存在 root_id 过滤条件
            if 'root_key' in filters and 'root_val' in filters:
                # 过滤 source_nodes 中 key=1 且 val=root_id 的数据
                root_key = filters.get('root_key')
                root_val = filters.get('root_val')
                query = query.filter(or_(cast(ScriptNodeData.source_nodes[root_key], String) == str(root_val), ScriptNodeData.id == root_val))

            begin_time = filters.get('begin_time')
            if begin_time:
                begin_time = datetime.datetime.strptime(begin_time, '%Y-%m-%d %H:%M')
                query = query.filter(ScriptNodeData.gmt_create >= begin_time)

            end_time = filters.get('end_time')
            if end_time:
                end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M')
                query = query.filter(ScriptNodeData.gmt_create <= end_time)

            fil = filters.get('content_not_null')

            if fil is not None:
                # 添加新的过滤条件以检查data字段非空
                if fil:
                    query = query.filter(and_(ScriptNodeData.content.isnot(None), ScriptNodeData.content != ''))
                else:
                    query = query.filter(or_(ScriptNodeData.content == None, ScriptNodeData.content == ''))

            search = filters.get('search')
            if search:
                query = query.filter(or_(ScriptNodeData.content.like(f"%{search}%"), ScriptNodeData.ext_info.like(f"%{search}%"), ScriptNodeData.content_key.like(f"%{search}%"), ScriptNodeData.id == search, ScriptNodeData.req_id.like(f"%{search}%"), ScriptNodeData.source_nodes.like(f"%:{search}%")))

            # 按 id 增序排序
            # query = query.order_by(asc(ScriptNodeData.id))
            # 如果提供了 size 参数，则应用分页限制
            if size is not None:
                # 如果未提供 offset，则使用默认值 0
                if offset is None:
                    offset = 0
                query = query.limit(size).offset(offset)

            # 获取查询结果
            script_node_data_list = query.all()
            return [data.copy() for data in script_node_data_list]

    @sql_monitor(detail=True)
    def select_datas_4_export(self, filters=None, offset=None, size=None) -> list[dict]:
        if filters is None:
            filters = {}

        with self.session_scope() as session:
            query = session.query(ScriptNodeData, SourceScriptNodeData).outerjoin(SourceScriptNodeData, ScriptNodeData.source_id == SourceScriptNodeData.id)

            # 根据提供的过滤条件筛选结果
            if 'node' in filters:
                if isinstance(filters['node'], (list, set, tuple)):
                    query = query.filter(ScriptNodeData.node.in_(filters['node']))
                else:
                    query = query.filter(ScriptNodeData.node == filters['node'])

            if 'source_node' in filters:
                if isinstance(filters['source_node'], (list, set, tuple)):
                    query = query.filter(ScriptNodeData.source_node.in_(filters['source_node']))
                else:
                    query = query.filter(ScriptNodeData.source_node == filters['source_node'])

            if 'content_type' in filters:
                content_type_ = filters['content_type']
                # 如果源是多个
                if not content_type_:
                    pass
                elif ',' in content_type_:
                    query = query.filter(ScriptNodeData.content_type.in_(content_type_.split(',')))
                else:
                    query = query.filter(ScriptNodeData.content_type == content_type_)

            if 'script_name' in filters:
                if isinstance(filters['script_name'], (list, set, tuple)):
                    query = query.filter(ScriptNodeData.script_name.in_(filters['script_name']))
                else:
                    query = query.filter(ScriptNodeData.script_name == filters['script_name'])

            if 'data_group' in filters:
                group_ = filters['data_group']
                # 如果源是多个
                if not group_:
                    pass
                elif ',' in group_:
                    query = query.filter(ScriptNodeData.data_group.in_(group_.split(',')))
                else:
                    query = query.filter(ScriptNodeData.data_group == group_)

            # 根据 source_ids 列表过滤 source_id
            if 'source_ids' in filters:
                query = query.filter(ScriptNodeData.source_id.in_(filters['source_ids']))
            # 根据 ids 列表过滤 id
            ids = filters.get('ids')
            if ids:
                query = query.filter(ScriptNodeData.id.in_(filters['ids']))

            # 检查是否存在 root_id 过滤条件
            if 'root_key' in filters and 'root_val' in filters:
                # 过滤 source_nodes 中 key=1 且 val=root_id 的数据
                root_key = filters.get('root_key')
                root_val = filters.get('root_val')
                query = query.filter(or_(cast(ScriptNodeData.source_nodes[root_key], String) == str(root_val), ScriptNodeData.id == root_val))

            begin_time = filters.get('begin_time')
            if begin_time:
                begin_time = datetime.datetime.strptime(begin_time, '%Y-%m-%d %H:%M')
                query = query.filter(ScriptNodeData.gmt_create >= begin_time)

            end_time = filters.get('end_time')
            if end_time:
                end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M')
                query = query.filter(ScriptNodeData.gmt_create <= end_time)

            fil = filters.get('content_not_null')

            if fil is not None:
                # 添加新的过滤条件以检查data字段非空
                if fil:
                    query = query.filter(and_(ScriptNodeData.content.isnot(None), ScriptNodeData.content != ''))
                else:
                    query = query.filter(or_(ScriptNodeData.content == None, ScriptNodeData.content == ''))

            search = filters.get('search')
            if search:
                query = query.filter(or_(ScriptNodeData.content.like(f"%{search}%"), ScriptNodeData.ext_info.like(f"%{search}%"), ScriptNodeData.content_key.like(f"%{search}%"), ScriptNodeData.id == search, ScriptNodeData.req_id.like(f"%{search}%"), ScriptNodeData.source_nodes.like(f"%:{search}%")))

            # 按 id 增序排序
            # query = query.order_by(asc(ScriptNodeData.id))
            # 如果提供了 size 参数，则应用分页限制
            if size is not None:
                # 如果未提供 offset，则使用默认值 0
                if offset is None:
                    offset = 0
                query = query.limit(size).offset(offset)

            # 获取查询结果
            node_data_aggregation_list = query.all()

            # 处理结果
            result_list = []
            for record in node_data_aggregation_list:
                script_node_data, source_script_node_data = record
                content = script_node_data.content
                try:
                    content = json.loads(script_node_data.content)
                except:
                    pass

                source_content = None
                if source_script_node_data:
                    try:
                        source_content = json.loads(source_script_node_data.content)
                    except:
                        pass

                result = {
                    'id': script_node_data.id,
                    'script_name': script_node_data.script_name,
                    'node': script_node_data.node,
                    'data_group': script_node_data.data_group,
                    'content_type': script_node_data.content_type,
                    'content_key': script_node_data.content_key,
                    'content': content,
                    'ext_info': script_node_data.ext_info if script_node_data.ext_info else {},
                    'req_id': script_node_data.req_id,
                    'gmt_create': script_node_data.gmt_create,
                    'source_id': script_node_data.source_id,
                    'source_node': script_node_data.source_node,
                    'source_nodes': script_node_data.source_nodes if script_node_data.source_nodes else {},
                    'source_data_group': source_script_node_data.data_group if source_script_node_data else None,
                    'source_content_type': source_script_node_data.content_type if source_script_node_data else None,
                    'source_content_key': source_script_node_data.content_key if source_script_node_data else None,
                    'source_content': source_content,
                    'source_ext_info': source_script_node_data.ext_info if source_script_node_data else None
                }
                result_list.append(result)
            return result_list

    @sql_monitor(detail=True)
    def delete_datas(self, filters=None) -> int:
        if filters is None:
            filters = {}

        with self.session_scope() as session:
            query = session.query(ScriptNodeData)

            # 根据提供的过滤条件筛选结果
            if 'node' in filters:
                if isinstance(filters['node'], (list, set, tuple)):
                    query = query.filter(ScriptNodeData.node.in_(filters['node']))
                else:
                    query = query.filter(ScriptNodeData.node == filters['node'])

            if 'source_node' in filters:
                if isinstance(filters['source_node'], (list, set, tuple)):
                    query = query.filter(ScriptNodeData.source_node.in_(filters['source_node']))
                else:
                    query = query.filter(ScriptNodeData.source_node == filters['source_node'])

            if 'content_type' in filters:
                content_type_ = filters['content_type']
                # 如果源是多个
                if not content_type_:
                    pass
                elif ',' in content_type_:
                    query = query.filter(ScriptNodeData.content_type.in_(content_type_.split(',')))
                else:
                    query = query.filter(ScriptNodeData.content_type == content_type_)

            if 'script_name' in filters:
                if isinstance(filters['script_name'], (list, set, tuple)):
                    query = query.filter(ScriptNodeData.script_name.in_(filters['script_name']))
                else:
                    query = query.filter(ScriptNodeData.script_name == filters['script_name'])

            if 'data_group' in filters:
                group_ = filters['data_group']
                # 如果源是多个
                if not group_:
                    pass
                elif ',' in group_:
                    query = query.filter(ScriptNodeData.data_group.in_(group_.split(',')))
                else:
                    query = query.filter(ScriptNodeData.data_group == group_)

            # 根据 source_ids 列表过滤 source_id
            if 'source_ids' in filters:
                query = query.filter(ScriptNodeData.source_id.in_(filters['source_ids']))
            # 根据 ids 列表过滤 id
            ids = filters.get('ids')
            if ids:
                query = query.filter(ScriptNodeData.id.in_(filters['ids']))

            # 检查是否存在 root_id 过滤条件
            if 'root_key' in filters and 'root_val' in filters:
                # 过滤 source_nodes 中 key=1 且 val=root_id 的数据
                root_key = filters.get('root_key')
                root_val = filters.get('root_val')
                query = query.filter(or_(cast(ScriptNodeData.source_nodes[root_key], String) == str(root_val), ScriptNodeData.id == root_val))

            begin_time = filters.get('begin_time')
            if begin_time:
                begin_time = datetime.datetime.strptime(begin_time, '%Y-%m-%d %H:%M')
                query = query.filter(ScriptNodeData.gmt_create >= begin_time)

            end_time = filters.get('end_time')
            if end_time:
                end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M')
                query = query.filter(ScriptNodeData.gmt_create <= end_time)

            fil = filters.get('content_not_null')

            if fil is not None:
                # 添加新的过滤条件以检查data字段非空
                if fil:
                    query = query.filter(and_(ScriptNodeData.content.isnot(None), ScriptNodeData.content != ''))
                else:
                    query = query.filter(or_(ScriptNodeData.content == None, ScriptNodeData.content == ''))

            search = filters.get('search')
            if search:
                query = query.filter(or_(ScriptNodeData.content.like(f"%{search}%"), ScriptNodeData.ext_info.like(f"%{search}%"), ScriptNodeData.content_key.like(f"%{search}%"), ScriptNodeData.id == search, ScriptNodeData.req_id.like(f"%{search}%"), ScriptNodeData.source_nodes.like(f"%:{search}%")))

            # 按 id 增序排序
            # query = query.order_by(asc(ScriptNodeData.id))

            # delete records instead of fetch records
            deleted_count = query.delete(synchronize_session=False)
            session.commit()  # Don't forget to commit changes
            return deleted_count

    @sql_monitor(detail=True, original_result=True)
    def count_datas(self, filters=None):
        if filters is None:
            filters = {}

        with self.session_scope() as session:
            query = session.query(ScriptNodeData)

            # 根据提供的过滤条件筛选结果
            if 'node' in filters:
                if isinstance(filters['node'], (list, set, tuple)):
                    query = query.filter(ScriptNodeData.node.in_(filters['node']))
                else:
                    query = query.filter(ScriptNodeData.node == filters['node'])

            if 'source_node' in filters:
                if isinstance(filters['source_node'], (list, set, tuple)):
                    query = query.filter(ScriptNodeData.source_node.in_(filters['source_node']))
                else:
                    query = query.filter(ScriptNodeData.source_node == filters['source_node'])

            if 'content_type' in filters:
                content_type_ = filters['content_type']
                # 如果源是多个
                if not content_type_:
                    pass
                elif ',' in content_type_:
                    query = query.filter(ScriptNodeData.content_type.in_(content_type_.split(',')))
                else:
                    query = query.filter(ScriptNodeData.content_type == content_type_)

            if 'script_name' in filters:
                if isinstance(filters['script_name'], (list, set, tuple)):
                    query = query.filter(ScriptNodeData.script_name.in_(filters['script_name']))
                else:
                    query = query.filter(ScriptNodeData.script_name == filters['script_name'])

            if 'data_group' in filters:
                data_group_ = filters['data_group']
                # 如果源是多个
                if not data_group_:
                    pass
                elif ',' in data_group_:
                    query = query.filter(ScriptNodeData.data_group.in_(data_group_.split(',')))
                else:
                    query = query.filter(ScriptNodeData.data_group == data_group_)

            # 根据 source_ids 列表过滤 source_id
            if 'source_ids' in filters:
                query = query.filter(ScriptNodeData.source_id.in_(filters['source_ids']))
            # 根据 ids 列表过滤 id
            ids = filters.get('ids')
            if ids:
                query = query.filter(ScriptNodeData.id.in_(filters['ids']))

            # 检查是否存在 root_id 过滤条件
            if 'root_key' in filters and 'root_val' in filters:
                # 过滤 source_nodes 中 key=1 且 val=root_id 的数据
                root_key = filters.get('root_key')
                root_val = filters.get('root_val')
                query = query.filter(or_(cast(ScriptNodeData.source_nodes[root_key], String) == str(root_val), ScriptNodeData.id == root_val))

            begin_time = filters.get('begin_time')
            if begin_time:
                begin_time = datetime.datetime.strptime(begin_time, '%Y-%m-%d %H:%M')
                query = query.filter(ScriptNodeData.gmt_create >= begin_time)

            end_time = filters.get('end_time')
            if end_time:
                end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M')
                query = query.filter(ScriptNodeData.gmt_create <= end_time)

            fil = filters.get('content_not_null')
            if fil is not None:
                # 添加新的过滤条件以检查data字段非空
                if fil:
                    query = query.filter(and_(ScriptNodeData.content.isnot(None), ScriptNodeData.content != ''))
                else:
                    query = query.filter(or_(ScriptNodeData.content == None, ScriptNodeData.content == ''))

            search = filters.get('search')
            if search:
                query = query.filter(
                    or_(ScriptNodeData.content.like(f"%{search}%"), ScriptNodeData.ext_info.like(f"%{search}%"), ScriptNodeData.content_key.like(f"%{search}%"), ScriptNodeData.source_id.like(f"%{search}%"), ScriptNodeData.id.like(f"%{search}%"), ScriptNodeData.req_id.like(f"%{search}%"),
                        ScriptNodeData.source_nodes.like(f"%{search}%")))

            # 获取查询结果
            count = query.count()
            return count

    @sql_monitor(detail=True, original_result=True)
    def distinct_types_by_script_name(self, script_names):
        # 查询现有数据集信息
        if not script_names:
            return {}
        with self.session_scope() as session:
            # 查询指定的 script_name 列表下的所有不同的 data_group
            query = session.query(ScriptNodeData.script_name, ScriptNodeData.node, ScriptNodeData.content_type).filter(ScriptNodeData.script_name.in_(script_names)).distinct(ScriptNodeData.script_name, ScriptNodeData.node, ScriptNodeData.content_type)
            # 获取查询结果并将其转换为字典
            result_list = query.all()
            result_dict = {}
            for script_name, node, content_type in result_list:
                if not content_type:
                    continue
                if script_name not in result_dict:
                    result_dict[script_name] = {
                        node: [content_type]
                    }
                else:
                    if node not in result_dict[script_name]:
                        result_dict[script_name][node] = [content_type]
                    else:
                        result_dict[script_name][node].append(content_type)
            return result_dict

    @sql_monitor(detail=True, original_result=True)
    def distinct_groups(self):
        # 查询现有数据集信息
        rows, keys = self.execute_sql('''
    SELECT distinct script_name,
                node,
                data_group
FROM script_node_data ''')
        # 该节点的数据集
        group_map = {}
        for row in rows:
            script_name = row[0]
            node = row[1]
            data_group = row[2]
            # group_map
            if script_name not in group_map:
                group_map[script_name] = {}
            if node not in group_map[script_name]:
                group_map[script_name][node] = [data_group]
            else:
                group_map[script_name][node].append(data_group)
        return group_map

    @sql_monitor(detail=True, original_result=True)
    def distinct_source_to_nodes(self):
        # 查询现有数据集信息
        rows, keys = self.execute_sql('''
        SELECT b.script_name,
       b.node,
       b.data_group,
       GROUP_CONCAT(DISTINCT CONCAT(a.node, '|', a.data_group)) as nodes
FROM script_node_data a
         INNER JOIN
     script_node_data b
     ON
         a.source_id = b.id
WHERE a.source_id IS NOT NULL
GROUP BY b.script_name, b.node, b.data_group''')
        # 该节点的数据集
        source_to_node_map = {}
        for row in rows:
            script_name = row[0]
            node = row[1]
            data_group = row[2]
            nodes = row[3].split(',')
            if script_name not in source_to_node_map:
                source_to_node_map[script_name] = {}
            kvs = {}
            source_to_node_map[script_name][node + '|' + data_group] = kvs
            # {
            # #     "在线sft样本生成|全链路分流rerank实验桶": {
            # #         "req_37763": [
            # #             "1020跑批测试",
            # #             "1024跑批",
            # #             "1026",
            # #             "1026-2",
            # #             "1030-01",
            # #             "全链路精排测试1025"
            # #         ]
            # #     }
            # # }
            for item in nodes:
                item_node = item.split('|')[0]
                item_group = item.split('|')[1]

                if item_node not in kvs:
                    kvs[item_node] = []
                kvs[item_node].append(item_group)

        return source_to_node_map

    @sql_monitor(detail=True, original_result=True)
    def distinct_node_to_sources(self):
        # 查询现有数据集信息
        rows, keys = self.execute_sql('''
            SELECT a.script_name,
       a.node,
       GROUP_CONCAT(DISTINCT CONCAT(b.node, '|', b.data_group)) as sources
FROM script_node_data a
         INNER JOIN
     script_node_data b
     ON
         a.source_id = b.id
WHERE a.source_id IS NOT NULL
GROUP BY a.script_name, a.node''')

        # 该节点的数据集
        node_to_source_map = {}
        for row in rows:
            script_name = row[0]
            node = row[1]
            sources = row[2].split(',')
            if script_name not in node_to_source_map:
                node_to_source_map[script_name] = {}
            node_to_source_map[script_name][node] = sources  # {  #     "req_37763": [  #         "judge_has_problem|crm_luxi_test",  #         "judge_has_problem|账号10511467971"  #     ]  # }

        return node_to_source_map

    @sql_monitor(detail=True, original_result=True)
    def distinct_source_by_script_name(self):
        # 查询现有数据集信息
        rows, keys = self.execute_sql('''
    SELECT
        a.script_name,
        a.node,
        a.data_group,
        GROUP_CONCAT(DISTINCT CONCAT(b.node, '|', b.data_group)) as source_groups
    FROM
        script_node_data a
    INNER JOIN
        script_node_data b
    ON
        a.source_id = b.id
    WHERE
        a.source_id IS NOT NULL
    GROUP BY
        a.script_name,
        a.node,
        a.data_group''')
        # 来源2节点
        source_to_node_map = {}
        node_to_source_map = {}
        for row in rows:
            script_name = row[0]
            node = row[1]
            data_group = row[2]
            source_groups = row[3].split(',')

            # node_to_source_map
            if script_name not in node_to_source_map:
                node_to_source_map[script_name] = {}
            if node not in node_to_source_map[script_name]:
                node_to_source_map[script_name][node] = source_groups
            else:
                node_to_source_map[script_name][node] = sorted(list(set(node_to_source_map[script_name][node] + source_groups)))

            # source_to_node_map
            for source_group in source_groups:
                if script_name not in source_to_node_map:
                    source_to_node_map[script_name] = {}
                if source_group not in source_to_node_map[script_name]:
                    source_to_node_map[script_name][source_group] = {}
                if node not in source_to_node_map[script_name][source_group]:
                    source_to_node_map[script_name][source_group][node] = [data_group]
                else:
                    source_to_node_map[script_name][source_group][node].append(data_group)

        return source_to_node_map, node_to_source_map


# 调用记录
class ScriptCallRecord(Base):
    __tablename__ = 'script_call_record'
    id = Column(Integer, primary_key=True, autoincrement=True)
    script_name = Column(String(128), nullable=False)
    node = Column(String(128), nullable=False)
    source_id = Column(Integer, nullable=False)
    req_id = Column(String(128))
    client_type = Column(String(64), nullable=False)
    params = Column(JSON)
    param_key = Column(String(128))
    status = Column(String(64))
    cost = Column(Integer)
    result = Column(Text)
    ext_info = Column(JSON)
    gmt_create = Column(DateTime, server_default=func.now(tz=timezone('Asia/Shanghai')))

    def copy(self):
        new_obj = ScriptCallRecord()
        new_obj.id = self.id
        new_obj.script_name = self.script_name
        new_obj.node = self.node
        new_obj.source_id = self.source_id
        new_obj.req_id = self.req_id
        new_obj.client_type = self.client_type
        new_obj.params = self.params if self.params else {}
        new_obj.param_key = self.param_key
        new_obj.status = self.status
        new_obj.cost = self.cost
        result = self.result
        try:
            result = json.loads(self.result)
        except:
            pass
        new_obj.result = result
        new_obj.ext_info = self.ext_info if self.ext_info else {}
        new_obj.gmt_create = self.gmt_create
        return new_obj

    def to_dict(self):
        result = self.result
        try:
            result = json.loads(self.result)
        except:
            pass
        return {
            "id": self.id,
            "script_name": self.script_name,
            "node": self.node,
            "source_id": self.source_id,
            "req_id": self.req_id,
            "client_type": self.client_type,
            "params": self.params if self.params else {},
            "param_key": self.param_key,
            "status": self.status,
            "cost": self.cost,
            "result": result,
            "ext_info": self.ext_info if self.ext_info else {},
            "gmt_create": self.gmt_create
        }

    def __repr__(self):
        return self.to_json()

    def to_json(self):
        return json.dumps(self.to_dict(), ensure_ascii=False, default=str)


# 调用记录
class ScriptCallRecordStorage(BaseDbStorage):

    @sql_monitor(detail=True)
    def clean_records(self):
        self.execute_sql('''
        delete from script_call_record
        where source_id not in (
        select id from script_node_data
        )
        ''')

    @sql_monitor()
    def add_record(self, script_call_record):
        if not script_call_record:
            return
        with self.session_scope() as session:

            if script_call_record.ext_info is None:
                script_call_record.ext_info = {}
            if script_call_record.params is None:
                script_call_record.params = {}
            if script_call_record.status == True:
                script_call_record.status = 'true'
            if script_call_record.result:
                script_call_record.result = json.dumps(script_call_record.result, ensure_ascii=False, default=str) if isinstance(script_call_record.result, (dict, list)) else script_call_record.result

            session.add(script_call_record)

    @sql_monitor(detail=True)
    def delete_record(self, record_id):
        if not record_id:
            raise ValueError('record_id cannot be null')
        with self.session_scope() as session:
            record = session.query(ScriptCallRecord).filter_by(id=record_id).first()
            session.delete(record)

    @sql_monitor(detail=True)
    def select_records(self, filters=None, offset=None, size=None) -> list[ScriptCallRecord]:
        if filters is None:
            filters = {}
        with self.session_scope() as session:
            query = session.query(ScriptCallRecord)
            # 根据提供的过滤条件筛选结果
            if 'script_name' in filters:
                query = query.filter(ScriptCallRecord.script_name == filters['script_name'])
            if 'node' in filters:
                query = query.filter(ScriptCallRecord.node == filters['node'])
            if 'source_id' in filters:
                query = query.filter(ScriptCallRecord.source_id == filters['source_id'])
            if 'source_ids' in filters:
                query = query.filter(ScriptCallRecord.source_id.in_(filters['source_ids']))
            if 'req_id' in filters:
                query = query.filter(ScriptCallRecord.req_id == filters['req_id'])
            if 'client_type' in filters:
                query = query.filter(ScriptCallRecord.client_type == filters['client_type'])
            if 'status' in filters:
                query = query.filter(ScriptCallRecord.status == filters['status'])

            # 按 id 增序排序
            # query = query.order_by(asc(ScriptCallRecord.id))

            # 如果提供了 size 参数，则应用分页限制
            if size is not None:
                # 如果未提供 offset，则使用默认值 0
                if offset is None:
                    offset = 0
                query = query.limit(size).offset(offset)

            # 获取查询结果
            records = query.all()
            return [record.copy() for record in records]


# 评测记录
class ScriptEvalData(Base):
    __tablename__ = 'script_eval_data'
    id = Column(Integer, primary_key=True, autoincrement=True)
    script_name = Column(String(128), nullable=False)
    source_node = Column(String(128), nullable=False)
    source_group = Column(String(128), nullable=False)
    source_id = Column(Integer, nullable=False)
    source_content_type = Column(String(128), nullable=False)
    source_content = Column(Text)
    eval_name = Column(String(128), nullable=False)
    eval_node = Column(String(128), nullable=False)
    req_id = Column(String(128), nullable=False)
    eval_datas = Column(JSON, nullable=False)
    results = Column(JSON)
    ext_info = Column(JSON)
    gmt_create = Column(DateTime, server_default=func.now(tz=timezone('Asia/Shanghai')))

    def copy(self):
        new_obj = ScriptEvalData()
        new_obj.id = self.id
        new_obj.script_name = self.script_name
        new_obj.source_node = self.source_node
        new_obj.source_group = self.source_group
        new_obj.source_id = self.source_id
        new_obj.source_content_type = self.source_content_type
        source_content = self.source_content
        try:
            source_content = json.loads(self.source_content)
        except:
            pass
        new_obj.source_content = source_content
        new_obj.eval_name = self.eval_name
        new_obj.eval_node = self.eval_node
        new_obj.req_id = self.req_id
        new_obj.eval_datas = self.eval_datas if self.eval_datas else {}
        new_obj.results = self.results if self.results else {}
        new_obj.ext_info = self.ext_info if self.ext_info else {}
        new_obj.gmt_create = self.gmt_create
        return new_obj

    def to_dict(self):
        return {
            "id": self.id,
            "script_name": self.script_name,
            "source_node": self.source_node,
            "source_group": self.source_group,
            "source_id": self.source_id,
            "source_content_type": self.source_content_type,
            'source_content': self.source_content,
            "eval_name": self.eval_name,
            "eval_node": self.eval_node,
            "req_id": self.req_id,
            "eval_datas": self.eval_datas if self.eval_datas else {},
            "results": self.results if self.results else {},
            "ext_info": self.ext_info if self.ext_info else {},
            "gmt_create": self.gmt_create
        }

    def __repr__(self):
        return self.to_json()

    def to_json(self):
        return json.dumps(self.to_dict(), ensure_ascii=False, default=str)


# 评测数据
class ScriptEvalDataStorage(BaseDbStorage):

    @sql_monitor()
    def add_eval_data(self, eval_data: ScriptEvalData):
        if not eval_data:
            return
        with self.session_scope() as session:
            if eval_data.ext_info is None:
                eval_data.ext_info = {}
            if eval_data.results is None:
                eval_data.results = {}
            if eval_data.source_content:
                eval_data.source_content = json.dumps(eval_data.source_content, ensure_ascii=False, default=str) if isinstance(eval_data.source_content, (dict, list)) else eval_data.source_content
            if eval_data.eval_datas:
                eval_data.eval_datas = {key: [da.to_dict() for da in val] for key, val in eval_data.eval_datas.items()}
            session.add(eval_data)

    @sql_monitor()
    def delete_eval_by_ids(self, script_node_data_ids):
        if not script_node_data_ids:
            return
        with self.session_scope() as session:
            session.query(ScriptEvalData).filter(ScriptEvalData.id.in_(script_node_data_ids)).delete(synchronize_session=False)

    @sql_monitor(detail=True)
    def select_eval_datas(self, filters=None, offset=None, size=None) -> list[ScriptEvalData]:
        if filters is None:
            filters = {}
        with self.session_scope() as session:
            query = session.query(ScriptEvalData)
            # 根据提供的过滤条件筛选结果
            if 'script_name' in filters:
                query = query.filter(ScriptEvalData.script_name == filters['script_name'])
            if 'eval_node' in filters:
                query = query.filter(ScriptEvalData.eval_node == filters['eval_node'])
            if 'eval_name' in filters:
                query = query.filter(ScriptEvalData.eval_name == filters['eval_name'])
            if 'source_id' in filters:
                query = query.filter(ScriptEvalData.source_id == filters['source_id'])
            if 'source_ids' in filters:
                query = query.filter(ScriptEvalData.source_id.in_(filters['source_ids']))
            if 'req_id' in filters:
                query = query.filter(ScriptEvalData.req_id == filters['req_id'])
            if 'source_node' in filters:
                query = query.filter(ScriptEvalData.source_node == filters['source_node'])
            if 'source_group' in filters:
                query = query.filter(ScriptEvalData.source_group == filters['source_group'])
            search = filters.get('search')
            if search:
                query = query.filter(or_(ScriptEvalData.source_content.like(f"%{search}%"), ScriptEvalData.ext_info.like(f"%{search}%"), ScriptEvalData.eval_datas.like(f"%{search}%"), ScriptEvalData.results.like(f"%:{search}%")))

            # 按 id 增序排序
            # query = query.order_by(asc(ScriptCallRecord.id))

            # 如果提供了 size 参数，则应用分页限制
            if size is not None:
                # 如果未提供 offset，则使用默认值 0
                if offset is None:
                    offset = 0
                query = query.limit(size).offset(offset)

            # 获取查询结果
            records = query.all()
            return [record.copy() for record in records]

    @sql_monitor(detail=True)
    def delete_eval_datas(self, filters=None) -> int:
        if filters is None:
            filters = {}
        with self.session_scope() as session:
            query = session.query(ScriptEvalData)
            # 根据提供的过滤条件筛选结果
            if 'script_name' in filters:
                query = query.filter(ScriptEvalData.script_name == filters['script_name'])
            if 'eval_node' in filters:
                query = query.filter(ScriptEvalData.eval_node == filters['eval_node'])
            if 'eval_name' in filters:
                query = query.filter(ScriptEvalData.eval_name == filters['eval_name'])
            if 'source_id' in filters:
                query = query.filter(ScriptEvalData.source_id == filters['source_id'])
            if 'source_ids' in filters:
                query = query.filter(ScriptEvalData.source_id.in_(filters['source_ids']))
            if 'req_id' in filters:
                query = query.filter(ScriptEvalData.req_id == filters['req_id'])
            if 'source_node' in filters:
                query = query.filter(ScriptEvalData.source_node == filters['source_node'])
            if 'source_group' in filters:
                query = query.filter(ScriptEvalData.source_group == filters['source_group'])
            search = filters.get('search')
            if search:
                query = query.filter(or_(ScriptEvalData.source_content.like(f"%{search}%"), ScriptEvalData.ext_info.like(f"%{search}%"), ScriptEvalData.eval_datas.like(f"%{search}%"), ScriptEvalData.results.like(f"%:{search}%")))

            # 按 id 增序排序
            # query = query.order_by(asc(ScriptCallRecord.id))
            # delete records instead of fetch records
            deleted_count = query.delete(synchronize_session=False)
            session.commit()  # Don't forget to commit changes
            return deleted_count

    @sql_monitor(detail=True)
    def count_eval_datas(self, filters=None) -> int:
        if filters is None:
            filters = {}
        with self.session_scope() as session:
            query = session.query(ScriptEvalData)
            # 根据提供的过滤条件筛选结果
            if 'script_name' in filters:
                query = query.filter(ScriptEvalData.script_name == filters['script_name'])
            if 'eval_node' in filters:
                query = query.filter(ScriptEvalData.eval_node == filters['eval_node'])
            if 'eval_name' in filters:
                query = query.filter(ScriptEvalData.eval_name == filters['eval_name'])
            if 'source_id' in filters:
                query = query.filter(ScriptEvalData.source_id == filters['source_id'])
            if 'source_ids' in filters:
                query = query.filter(ScriptEvalData.source_id.in_(filters['source_ids']))
            if 'req_id' in filters:
                query = query.filter(ScriptEvalData.req_id == filters['req_id'])
            if 'source_node' in filters:
                query = query.filter(ScriptEvalData.source_node == filters['source_node'])
            if 'source_group' in filters:
                query = query.filter(ScriptEvalData.source_group == filters['source_group'])
            search = filters.get('search')
            if search:
                query = query.filter(or_(ScriptEvalData.source_content.like(f"%{search}%"), ScriptEvalData.ext_info.like(f"%{search}%"), ScriptEvalData.eval_datas.like(f"%{search}%"), ScriptEvalData.results.like(f"%:{search}%")))

            # 按 id 增序排序
            # query = query.order_by(asc(ScriptCallRecord.id))

            # 获取查询结果
            count = query.count()
            return count

    @sql_monitor(detail=True)
    def select_need_eval_datas(self, filters=None, offset: int = None, size: int = None) -> list[EvalInfo]:
        if filters is None:
            filters = {}
        script_name = filters.get('script_name')
        source_node = filters.get('source_node')
        source_group = filters.get('source_group')
        eval_node = filters.get('eval_node')
        eval_name = filters.get('eval_name')
        group_str = "','".join(filters.get('data_groups').split(','))
        if not script_name or not source_node or not source_group or not eval_node or not eval_name or not group_str:
            raise ValueError('参数不全')
        limit_str = ''
        if size is not None:
            if offset is not None:
                offset = 0
            limit_str = 'limit ' + str(offset) + ',' + str(size)
        elif offset is not None:
            raise ValueError('分页参数异常')

        source_id = filters.get('source_id')
        source_limit_str = ''
        if source_id:
            source_limit_str = 'and id=' + str(source_id)
        rows, columns = self.execute_sql(f'''select a.script_name,
       a.id          source_id,
       a.node        source_node,
       a.data_group  source_group,
       a.content_key source_content_key,
       a.content_type source_content_type,
       a.content     source_content,
       max(b.node) eval_node,
       JSON_ARRAYAGG(
               JSON_OBJECT(
                       'id', b.id,
                       'data_group', b.data_group,
                       'content_type', b.content_type,
                       'content', b.content,
                       'ext_info', b.ext_info
                   )
           ) AS      eval_datas
from (select *
      from script_node_data
      where script_name = '{script_name}'
        and node = '{source_node}'
        and data_group = '{source_group}'
        {source_limit_str}
      {limit_str} ) a
         inner join (select *
                     from script_node_data b
                     where script_name = '{script_name}'
                       and node = '{eval_node}'
                       and data_group in ('{group_str}')) b
                    on a.id = b.source_id

where a.content is not null
  and a.content != ''
  and not EXISTS(select *
                 from script_eval_data
                 where script_name = '{script_name}'
                   and eval_node = '{eval_node}'
                   and eval_name = '{eval_name}'
                   and a.id = source_id
    )
group by a.script_name, a.id, a.node, a.data_group, a.content_key, a.content
    ''')

        # 创建一个新的列表，每一个元素是一个字典，表示一行数据
        json_data = []
        for row in rows:
            row_data = dict(zip(columns, row))
            try:
                row_data['source_content'] = json.loads(row_data.get('source_content'))
            except:
                pass
            eval_data_array = json.loads(row_data.get('eval_datas'))
            row_data['eval_datas'] = defaultdict(list)

            for item in eval_data_array:
                data_group = item.get('data_group')
                eval_data = EvalData(id=item.get('id'), data_group=data_group, content_type=item.get('content_type'), content=item.get('content'), ext_info=item.get('ext_info'))
                row_data['eval_datas'][data_group].append(eval_data)

            eval_data = EvalInfo(eval_name=eval_name, script_name=row_data.get('script_name'), source_node=row_data.get('source_node'), source_group=row_data.get('source_group'), source_id=row_data.get('source_id'), source_content_type=row_data.get('source_content_type'),
                                 source_content=row_data.get('source_content'), eval_node=row_data.get('eval_node'), eval_datas=row_data.get('eval_datas'))
            json_data.append(eval_data)

        return json_data

    @sql_monitor(detail=True)
    def distinct_eval_names(self):
        with self.session_scope() as session:
            # 查询指定的 script_name 列表下的所有不同的 data_group
            query = session.query(ScriptEvalData.script_name, ScriptEvalData.eval_node, ScriptEvalData.source_node, ScriptEvalData.source_group, ScriptEvalData.eval_name).distinct(ScriptEvalData.script_name, ScriptEvalData.eval_node, ScriptEvalData.source_node, ScriptEvalData.source_group,
                                                                                                                                                                                    ScriptEvalData.eval_name)
            # 获取查询结果并将其转换为字典
            result_list = query.all()
            result_dict = {}
            for script_name, eval_node, source_node, source_group, eval_name in result_list:
                if not eval_name:
                    continue
                if script_name not in result_dict:
                    result_dict[script_name] = {}
                if eval_node not in result_dict[script_name]:
                    result_dict[script_name][eval_node] = {}
                source_groups = source_node + '|' + source_group
                if source_groups not in result_dict[script_name][eval_node]:
                    result_dict[script_name][eval_node][source_groups] = [eval_name]
                else:
                    result_dict[script_name][eval_node][source_groups].append(eval_name)
            return result_dict


# 脚本方案
class ScriptNodePlan(Base):
    __tablename__ = 'script_node_plan'
    id = Column(Integer, primary_key=True, autoincrement=True)
    script_name = Column(String(128), nullable=False)
    node = Column(String(128), nullable=False)
    name = Column(String(128), nullable=False)
    online = Column(Boolean, nullable=False)
    config = Column(JSON)
    gmt_modified = Column(DateTime, server_default=func.now(tz=timezone('Asia/Shanghai')), onupdate=text('CURRENT_TIMESTAMP'))

    def copy(self):
        new_obj = ScriptNodePlan()
        new_obj.id = self.id
        new_obj.script_name = self.script_name
        new_obj.node = self.node
        new_obj.name = self.name
        new_obj.online = self.online
        new_obj.config = self.config if self.config else {}
        new_obj.gmt_modified = self.gmt_modified
        return new_obj

    def to_dict(self):
        return {
            "id": self.id,
            "script_name": self.script_name,
            "node": self.node,
            "name": self.name,
            "online": self.online,
            "config": self.config,
            "gmt_modified": self.gmt_modified
        }

    def __repr__(self):
        return self.to_json()

    def to_json(self):
        return json.dumps(self.to_dict(), ensure_ascii=False, default=str)


# 脚本方案
class ScriptPlanStorage(BaseDbStorage):

    def select_online_plan(self, script_name: str = None, node: str = None):
        if not script_name or not node:
            return None
        plans = self.select_plans(filters={
            "script_name": script_name,
            "node": node,
            "online": True
        })
        if not plans:
            return None
        return plans[0]

    @sql_monitor()
    def add_plan(self, plan: ScriptNodePlan):
        if not plan:
            return
        with self.session_scope() as session:
            if plan.config is None:
                plan.config = {}
            session.add(plan)

    @sql_monitor(detail=True)
    def delete_plan(self, id):
        if not id:
            raise ValueError('paas_entity_id cannot be null')
        with self.session_scope() as session:
            item = session.query(ScriptNodePlan).filter_by(id=id).first()
            session.delete(item)

    @sql_monitor(detail=True)
    def update_plan_by_id(self, plan_id, update_dict):
        if not plan_id or not update_dict:
            return

        with self.session_scope() as session:
            item = session.query(ScriptNodePlan).filter_by(id=plan_id).first()
            if item:
                old_online = item.online
                for key, value in update_dict.items():
                    if hasattr(item, key):
                        setattr(item, key, value)

    @sql_monitor(detail=True)
    def switch_plan(self, plan_id, online):
        if not plan_id:
            return
        with self.session_scope() as session:
            item = session.query(ScriptNodePlan).filter_by(id=plan_id).first()
            if not item:
                raise ValueError('plan is not exist')

            if online:
                on_planes = self.select_plans(filters={
                    'script_name': item.script_name,
                    'node': item.node,
                    'online': True
                })
                if on_planes:
                    for p in on_planes:
                        self.switch_plan(p.id, False)
            setattr(item, 'online', online)

    @sql_monitor(detail=True)
    def select_plan_by_id(self, plan_id) -> ScriptNodePlan:
        if not plan_id:
            return None
        with self.session_scope() as session:
            item = session.query(ScriptNodePlan).filter_by(id=plan_id).first()
            return item.copy() if item else None

    @sql_monitor(detail=True)
    def select_plans(self, filters=None, offset=None, size=None) -> list[ScriptNodePlan]:
        if filters is None:
            filters = {}
        with self.session_scope() as session:
            query = session.query(ScriptNodePlan)
            # 根据提供的过滤条件筛选结果
            if 'script_name' in filters:
                query = query.filter(ScriptNodePlan.script_name == filters['script_name'])
            if filters.get('node'):
                query = query.filter(ScriptNodePlan.node == filters['node'])
            if 'online' in filters:
                query = query.filter(ScriptNodePlan.online == filters['online'])
            if 'name' in filters:
                query = query.filter(ScriptNodePlan.name == filters['name'])

            # 按 id 增序排序
            # query = query.order_by(asc(PromptAAS.id))

            # 如果提供了 size 参数，则应用分页限制
            if size is not None:
                # 如果未提供 offset，则使用默认值 0
                if offset is None:
                    offset = 0
                query = query.limit(size).offset(offset)

            # 获取查询结果
            plans = query.all()
            return [item.copy() for item in plans]

    @sql_monitor(detail=True, original_result=True)
    def distinct_plan_names(self):
        with self.session_scope() as session:
            # 查询指定的 script_name 列表下的所有不同的 data_group
            query = session.query(ScriptNodePlan.script_name, ScriptNodePlan.node, ScriptNodePlan.name, ScriptNodePlan.id, ScriptNodePlan.online).distinct(ScriptNodePlan.script_name, ScriptNodePlan.node, ScriptNodePlan.name, ScriptNodePlan.id, ScriptNodePlan.online)
            # 获取查询结果并将其转换为字典
            result_list = query.all()
            result_dict = {}
            for script_name, node, name, id, online in result_list:
                if not name:
                    continue
                if script_name not in result_dict:
                    result_dict[script_name] = {
                        node: {}
                    }
                elif node not in result_dict[script_name]:
                    result_dict[script_name][node] = {}

                result_dict[script_name][node][id] = name + '（默认）' if online else name
            return result_dict


# 向量数据
class EmbedData(Base):
    __tablename__ = 'embed_data'
    id = Column(Integer, primary_key=True, autoincrement=True)
    embed_scope = Column(String(128), nullable=False)
    tags = Column(JSON)
    keywords = Column(JSON)
    data_group = Column(String(128), nullable=False)
    content = Column(JSON)
    embed_text = Column(String(1024), nullable=False)
    embed_array = Column(JSON)
    gmt_modified = Column(DateTime, server_default=func.now(tz=timezone('Asia/Shanghai')), onupdate=text('CURRENT_TIMESTAMP'))

    def copy(self):
        new_obj = EmbedData()
        new_obj.id = self.id
        new_obj.embed_scope = self.embed_scope
        new_obj.data_group = self.data_group
        new_obj.tags = self.tags
        new_obj.keywords = self.keywords
        new_obj.content = self.content
        new_obj.embed_text = self.embed_text
        new_obj.embed_array = self.embed_array
        new_obj.gmt_modified = self.gmt_modified
        return new_obj

    def to_dict(self):
        return {
            "id": self.id,
            "embed_scope": self.embed_scope,
            "data_group": self.data_group,
            "tags": self.tags,
            "keywords": self.keywords,
            "content": self.content,
            "embed_text": self.embed_text,
            'embed_array': self.embed_array,
            "gmt_modified": self.gmt_modified
        }

    def __repr__(self):
        return self.to_json()

    def to_json(self):
        return json.dumps(self.to_dict(), ensure_ascii=False, default=str)


# 示例
class EmbedDataStorage(BaseDbStorage):
    def __init__(self):
        self.chroma_client = chromadb.PersistentClient(path=os.path.join(os.path.dirname(os.path.abspath(__file__)), "chroma_datas"))

        class MyEmbeddingFunction(EmbeddingFunction):
            def __call__(self, input: Documents) -> Embeddings:
                # embed the documents somehow
                embeddings = []
                for item in input:
                    embedding = util.embedding(item, model='待实现')
                    if not embedding:
                        raise ValueError('embedding is empty')
                    embeddings.append(embedding)
                return embeddings

        self.embed_func = MyEmbeddingFunction()

    @sql_monitor()
    def add_embeds(self, datas):
        if not datas:
            return
        with self.session_scope() as session:
            for data in datas:
                if data.content is None:
                    data.content = {}
                if data.keywords is None:
                    data.keywords = []
                data.gmt_modified = datetime.datetime.now()
            session.add_all(datas)

    def embedding(self, texts: Union[str, list[str]] = None) -> list[list[float]]:
        if not texts:
            raise ValueError('text 不可为空')
        texts = texts if isinstance(texts, list) else [texts]
        return self.embed_func(texts)

    @sql_monitor()
    def import_embeds(self, data_group, data_list, pre_check, embed_scope):
        if not data_group or not embed_scope:
            raise ValueError('基本信息不全')
        data_list = data_list if data_list else []
        embed_datas = []
        for data in data_list:
            embed_data = EmbedData()
            embed_data.embed_scope = embed_scope
            embed_data.data_group = data_group
            if 'embed_text' not in data:
                raise ValueError('embed_text 不可为空')
            embed_data.embed_text = data.pop('embed_text')
            if 'tags' in data:
                tags = json.loads(data.pop('tags'))
                if not isinstance(tags, list):
                    raise ValueError('标签是json列表')
                embed_data.tags = tags
            if 'keywords' in data:
                keywords = json.loads(data.pop('keywords'))
                if not isinstance(keywords, list):
                    raise ValueError('关键词是json列表')
                embed_data.keywords = keywords
            if 'content' in data:
                content = json.loads(data.get('content'))
                if not isinstance(content, dict):
                    raise ValueError('content是字典')
                embed_data.content = content
            else:
                embed_data.content = data
            embed_data.gmt_modified = datetime.datetime.now()
            embed_datas.append(embed_data)
        # 预验证
        if pre_check:
            # 验证有没有数据做提示即可，暂时简单做
            count = self.count_embeds(filters={
                'embed_scope': embed_scope,
                'data_group': data_group
            })
            first = embed_datas[0] if embed_datas else None
            return {
                'old_count': count,
                'count': len(embed_datas),
                'first_data': json.loads(first.to_json()) if first else {}
            }
        # Insert the data into the database
        self.add_embeds(embed_datas)
        return {
            'count': len(embed_datas)
        }

    @sql_monitor(detail=True)
    def embedding_with_id(self, id):
        embed = self.select_embed_by_id(id)
        if not embed:
            raise ValueError('不存在的数据')
        if not embed.embed_text:
            raise ValueError('向量文本不可为空')
        embeddings = self.embedding(embed.embed_text)
        self.update_embed_by_id(id, {
            'embed_array': embeddings[0]
        })

    @sql_monitor(detail=True)
    def embedding_with_filters(self, filters: dict = None):
        if not filters.get('embed_scope') or not filters.get('data_group'):
            raise ValueError('embed_scope 和 data_group 不可为空')
        embeds = self.select_embeds(filters=filters)
        if not embeds:
            return
        for embed in embeds:
            embeddings = self.embedding(embed.embed_text)
            self.update_embed_by_id(embed.id, {
                'embed_array': embeddings[0]
            })

    @sql_monitor(detail=True)
    def destroy_chroma(self, scope):
        self.chroma_client.delete_collection(scope)


    @sql_monitor(detail=True)
    def sync_chroma_with_id(self, id):
        embed = self.select_embed_by_id(id)
        if not embed:
            raise ValueError('不存在的数据')
        collection = self.chroma_client.get_or_create_collection(embed.embed_scope, embedding_function=self.embed_func, metadata={
            "hnsw:space": "cosine"
        })

        if not embed.embed_array:
            collection.delete(ids=[str(id)])
            return

        get_res = collection.get(ids=[str(id)])
        if get_res.get('ids'):
            collection.update(documents=[embed.embed_text], embeddings=[embed.embed_array], metadatas=[{
                **embed.content,
                'data_group': embed.data_group,
                'tags': json.dumps(embed.tags, ensure_ascii=False) if embed.tags else '',
                'keywords': json.dumps(embed.keywords, ensure_ascii=False) if embed.keywords else '',
                'gmt_modified': embed.gmt_modified.timestamp(),
                'embedding': json.dumps(embed.embed_array)
            }], ids=[str(id)])
        else:
            collection.add(documents=[embed.embed_text], embeddings=[embed.embed_array], metadatas=[{
                **embed.content,
                'data_group': embed.data_group,
                'tags': json.dumps(embed.tags, ensure_ascii=False) if embed.tags else '',
                'keywords': json.dumps(embed.keywords, ensure_ascii=False) if embed.keywords else '',
                'gmt_modified': embed.gmt_modified.timestamp(),
                'embedding': json.dumps(embed.embed_array)
            }], ids=[str(id)])

    @sql_monitor(detail=True)
    def sync_chroma_with_data_group(self, embed_scope: str = None, data_group: str = None) -> list[EmbedData]:
        if not embed_scope or not data_group:
            raise ValueError('embed_scope 和 data_group 不可为空')

        collection = self.chroma_client.get_or_create_collection(embed_scope, embedding_function=self.embed_func, metadata={
            "hnsw:space": "cosine"
        })
        # 查到所有的
        embeds = self.select_embeds(filters={
            'embed_scope': embed_scope,
            'data_group': data_group,
            'embed': True
        })
        all_ids = [str(item.id) for item in embeds]

        groups = data_group.split(',')

        # 存在的
        get_res = collection.get(where={
            "data_group": {
                "$in": groups
            }
        })
        exist_ids = get_res.get('ids')

        # 删除多余的
        del_ids = list(set(exist_ids) - set(all_ids))
        if del_ids:
            collection.delete(ids=del_ids, where={
                "data_group": {
                    "$in": groups
                }
            })

        # 假设你需要根据存在的ids更新文档
        exist_embeds = [embed for embed in embeds if str(embed.id) in exist_ids]
        if exist_embeds:
            exist_documents = [embed.embed_text for embed in exist_embeds]
            exist_embeddings = [embed.embed_array for embed in exist_embeds]
            exist_metas = [{
                **embed.content,
                'data_group': embed.data_group,
                'tags': json.dumps(embed.tags, ensure_ascii=False) if embed.tags else '',
                'keywords': json.dumps(embed.keywords, ensure_ascii=False) if embed.keywords else '',
                'gmt_modified': embed.gmt_modified.timestamp(),
                'embedding': json.dumps(embed.embed_array)
            } for embed in exist_embeds]
            collection.update(documents=exist_documents, embeddings=exist_embeddings, metadatas=exist_metas, ids=exist_ids)

        # 假设你需要根据新的embeds添加文档
        new_ids = list(set(all_ids) - set(exist_ids))
        new_embeds = [embed for embed in embeds if str(embed.id) in new_ids]
        if new_embeds:
            new_ids = [str(embed.id) for embed in new_embeds]
            new_documents = [embed.embed_text for embed in new_embeds]
            new_embeddings = [embed.embed_array for embed in new_embeds]
            new_metas = [{
                **embed.content,
                'data_group': embed.data_group,
                'tags': json.dumps(embed.tags, ensure_ascii=False) if embed.tags else '',
                'keywords': json.dumps(embed.keywords, ensure_ascii=False) if embed.keywords else '',
                'gmt_modified': embed.gmt_modified.timestamp(),
                'embedding': json.dumps(embed.embed_array)
            } for embed in new_embeds]
            collection.add(documents=new_documents, embeddings=new_embeddings, metadatas=new_metas, ids=new_ids)
        return embeds

    @sql_monitor(detail=True)
    def delete_by_id(self, id):
        if not id:
            raise ValueError('embed_id cannot be null')
        item = None
        with self.session_scope() as session:
            item = session.query(EmbedData).filter_by(id=id).first()
            session.delete(item)
        # 删除库
        if item:
            try:
                collection = self.chroma_client.get_collection(name=item.embed_scope, embedding_function=self.embed_func)
                collection.delete(ids=[str(id)])
            except:
                pass

    @sql_monitor(detail=True)
    def update_embed_by_id(self, embed_id, update_dict):
        if not embed_id or not update_dict:
            return

        with self.session_scope() as session:
            item = session.query(EmbedData).filter_by(id=embed_id).first()
            if item:
                for key, value in update_dict.items():
                    if hasattr(item, key):
                        setattr(item, key, value)
                if 'embed_array' not in update_dict:
                    setattr(item, 'embed_array', None)

    @sql_monitor(detail=True)
    def select_embed_by_id(self, id) -> EmbedData:
        if not id:
            return None
        with self.session_scope() as session:
            item = session.query(EmbedData).filter_by(id=id).first()
            return item.copy() if item else None

    @sql_monitor(detail=True)
    def select_chroma(self, embed_scope: str = None, ids: list = None) -> dict[str, dict]:
        if not embed_scope:
            raise ValueError('向量域不可为空')
        if not ids:
            return {}
        try:
            collection = self.chroma_client.get_collection(name=embed_scope, embedding_function=self.embed_func)
        except:
            return {}
        get_res = collection.get(ids=[str(item) for item in ids])
        # 转换
        return {id_: {
            "content": {key: value for key, value in metadata.items() if key not in ('data_group', 'gmt_modified', 'tags', 'keywords', 'embedding')},
            "embed_text": document,
            "data_group": metadata.get('data_group'),
            'gmt_modified': datetime.datetime.fromtimestamp(metadata.get('gmt_modified')) if metadata.get('gmt_modified') else None,
            'tags': json.loads(metadata.get('tags')) if metadata.get('tags') else None,
            'keywords': json.loads(metadata.get('keywords')) if metadata.get('keywords') else None,
            'embedding': json.loads(metadata.get('embedding')) if metadata.get('embedding') else None
        } for id_, metadata, document in zip(get_res["ids"], get_res["metadatas"], get_res["documents"])}

    @sql_monitor(detail=True)
    def select_embeds(self, filters=None, offset=None, size=None) -> list[EmbedData]:
        if filters is None:
            filters = {}
        with self.session_scope() as session:
            query = session.query(EmbedData)
            # 根据提供的过滤条件筛选结果
            if 'embed_scope' in filters:
                query = query.filter(EmbedData.embed_scope == filters['embed_scope'])
            if 'data_group' in filters:
                group_ = filters['data_group']
                # 如果源是多个
                if not group_:
                    pass
                elif ',' in group_:
                    query = query.filter(EmbedData.data_group.in_(group_.split(',')))
                else:
                    query = query.filter(EmbedData.data_group == group_)
            embed = filters.get('embed')
            if embed is not None:
                if embed == True:
                    query = query.filter(EmbedData.embed_array.isnot(None))
                elif embed == False:
                    query = query.filter(EmbedData.embed_array.is_(None))

            search = filters.get('search')
            if search:
                query = query.filter(or_(EmbedData.embed_text.like(f"%{search}%"), EmbedData.content.like(f"%{search}%"), EmbedData.keywords.like(f"%{search}%"), EmbedData.tags.like(f"%{search}%")))
            # 按 id 增序排序
            # query = query.order_by(asc(PromptAAS.id))

            # 如果提供了 size 参数，则应用分页限制
            if size is not None:
                # 如果未提供 offset，则使用默认值 0
                if offset is None:
                    offset = 0
                query = query.limit(size).offset(offset)

            # 获取查询结果
            plans = query.all()
            return [item.copy() for item in plans]

    @sql_monitor(detail=True)
    def count_embeds(self, filters=None) -> int:
        if filters is None:
            filters = {}
        with self.session_scope() as session:
            query = session.query(EmbedData)
            # 根据提供的过滤条件筛选结果
            if 'embed_scope' in filters:
                query = query.filter(EmbedData.embed_scope == filters['embed_scope'])
            if 'data_group' in filters:
                group_ = filters['data_group']
                # 如果源是多个
                if not group_:
                    pass
                elif ',' in group_:
                    query = query.filter(EmbedData.data_group.in_(group_.split(',')))
                else:
                    query = query.filter(EmbedData.data_group == group_)
            embed = filters.get('embed')
            if embed is not None:
                if embed == True:
                    query = query.filter(EmbedData.embed_array.isnot(None))
                elif embed == False:
                    query = query.filter(EmbedData.embed_array.is_(None))
            search = filters.get('search')
            if search:
                query = query.filter(or_(EmbedData.embed_text.like(f"%{search}%"), EmbedData.content.like(f"%{search}%"), EmbedData.keywords.like(f"%{search}%"), EmbedData.tags.like(f"%{search}%")))
            # 按 id 增序排序
            # query = query.order_by(asc(PromptAAS.id))
            # 获取查询结果
            count = query.count()
            return count

    @sql_monitor(detail=True)
    def delete_embeds(self, filters=None) -> int:
        if filters is None:
            filters = {}
        if not filters.get('embed_scope'):
            raise ValueError('向量域不可为空')
        with self.session_scope() as session:
            query = session.query(EmbedData)
            # 根据提供的过滤条件筛选结果
            if 'embed_scope' in filters:
                query = query.filter(EmbedData.embed_scope == filters['embed_scope'])
            if 'data_group' in filters:
                group_ = filters['data_group']
                # 如果源是多个
                if not group_:
                    pass
                elif ',' in group_:
                    query = query.filter(EmbedData.data_group.in_(group_.split(',')))
                else:
                    query = query.filter(EmbedData.data_group == group_)
            embed = filters.get('embed')
            if embed is not None:
                if embed == True:
                    query = query.filter(EmbedData.embed_array.isnot(None))
                elif embed == False:
                    query = query.filter(EmbedData.embed_array.is_(None))
            search = filters.get('search')
            if search:
                query = query.filter(or_(EmbedData.embed_text.like(f"%{search}%"), EmbedData.content.like(f"%{search}%"), EmbedData.keywords.like(f"%{search}%"), EmbedData.tags.like(f"%{search}%")))

            # 按 id 增序排序
            # query = query.order_by(asc(ScriptCallRecord.id))

            # 获取查询结果
            datas = query.all()
            if datas:
                try:
                    collection = self.chroma_client.get_collection(name=filters.get('embed_scope'), embedding_function=self.embed_func)
                    collection.delete(ids=[str(item.id) for item in datas])
                except:
                    pass
            # delete records instead of fetch records
            deleted_count = query.delete(synchronize_session=False)
            session.commit()  # Don't forget to commit changes
            return deleted_count

    @sql_monitor(detail=True)
    def search_chroma(self, embed_scope: str = None, data_group: list[str] = None, query_embeddings: Union[list[float], list[list[float]]] = None, query_texts: Union[str, list[str]] = None, size: int = None):
        if not embed_scope or not data_group or not size or (not query_texts and not query_embeddings) or not size:
            raise ValueError('向量域 和 数据集 和 召回数量 和召回输入 召回数量不可为空')
        try:
            collection = self.chroma_client.get_collection(name=embed_scope, embedding_function=self.embed_func)
        except:
            return ([],[])

        # 不是embedding就手动embedding下
        if not query_embeddings:
            query_embeddings = self.embed_func(query_texts)
        query_embeddings = query_embeddings if isinstance(query_embeddings[0], list) else [query_embeddings]
        get_res = collection.query(query_embeddings=query_embeddings, n_results=size, where={
            "data_group": {
                "$in": data_group
            }
        })

        result = []
        for ids, distances, metadatas, documents in zip(get_res['ids'], get_res['distances'], get_res['metadatas'], get_res['documents']):
            for id, dist, metadata, doc in zip(ids, distances, metadatas, documents):
                data = {
                    "id": id,
                    "distance": dist,
                    "content": {key: value for key, value in metadata.items() if key not in ('data_group', 'gmt_modified', 'tags', 'keywords', 'embedding')},
                    "embed_text": doc,
                    "data_group": metadata.get('data_group'),
                    'gmt_modified': str(datetime.datetime.fromtimestamp(metadata.get('gmt_modified'))) if metadata.get('gmt_modified') else None,
                    'tags': json.loads(metadata.get('tags')) if metadata.get('tags') else None,
                    'keywords': json.loads(metadata.get('keywords')) if metadata.get('keywords') else None,
                    'embedding': json.loads(metadata.get('embedding')) if metadata.get('embedding') else None
                }
                result.append(data)
        result.sort(key=lambda x: x['distance'])

        def deduplicate_and_limit(results, limit):
            seen = {}
            new_results = []
            for result in results:
                if result['id'] not in seen:
                    seen[result['id']] = True
                    new_results.append(result)
                    if len(new_results) == limit:
                        break
            return new_results

        # 返回第一个embedding，以及结果
        return (query_embeddings[0], deduplicate_and_limit(result, size))

    @sql_monitor(detail=True, original_result=True)
    def distinct_groups(self):
        with self.session_scope() as session:
            # 查询指定的 embed_scope 列表下的所有不同的 data_group
            query = session.query(EmbedData.embed_scope, EmbedData.data_group).distinct(EmbedData.embed_scope, EmbedData.data_group)
            # 获取查询结果并将其转换为字典
            result_list = query.all()
            result_dict = {}
            for embed_scope, data_group in result_list:
                if embed_scope not in result_dict:
                    result_dict[embed_scope] = []
                result_dict[embed_scope].append(data_group)
            return result_dict


class OdpsStorage():

    def read(self, sql: str) -> list[dict]:
        if not read_odps:
            raise ValueError("please check sys config READ_ODPS_ACCESS_ID,READ_ODPS_ACCESS_KEY,READ_ODPS_PROJECT,READ_ODPS_ENDPOINT")
        if not sql:
            raise ValueError("sql为空")

        rows = []
        with read_odps.execute_sql(sql, hints={
            "odps.sql.submit.mode": "script"
        }).open_reader(tunnel=True) as reader:
            headers = reader.schema.names
            if not headers:
                raise ValueError('未发现查询列')
            for record in reader:
                vals = record.values
                row = {}
                for ind in range(len(headers)):
                    val = '' if vals[ind] == 'null' else vals[ind]
                    row[headers[ind]] = val
                rows.append(row)
        return rows

    def select_node_data_with_explain(self, pt: str = None, explain_sql: str = None):
        '''
# CREATE TABLE IF NOT EXISTS alsc_tech.alsc_merchant_agi_script_node_data(
# 	id BIGINT COMMENT 'id',
# 	script_name STRING COMMENT '脚本名称',
# 	node STRING COMMENT '所属节点',
# 	data_group STRING COMMENT '数据批次',
# 	content_type STRING COMMENT '数据类型',
# 	source_node STRING COMMENT '源节点',
# 	source_id BIGINT COMMENT '源id',
# 	req_id STRING COMMENT '生成id',
# 	source_nodes STRING COMMENT '源JSON',
# 	content_key STRING COMMENT '数据key',
# 	content STRING COMMENT '数据',
# 	ext_info STRING COMMENT '扩咱JSON',
# 	gmt_create STRING COMMENT '生成时间',
# 	source_data_group STRING COMMENT '源样本批次',
# 	source_content_type STRING COMMENT '源样本类型',
# 	source_content_key STRING COMMENT '源样本key',
# 	source_content STRING COMMENT '源样本内容',
# 	source_ext_info STRING COMMENT '源样本扩展'
# )
# PARTITIONED BY (pt STRING)
# STORED AS ALIORC
# TBLPROPERTIES ('columnar.nested.type'='true')
# LIFECYCLE 365;
        :param pt:
        :param explain_sql:
        :return:
        '''
        if not read_odps:
            raise ValueError("please check sys config READ_ODPS_ACCESS_ID,READ_ODPS_ACCESS_KEY,READ_ODPS_PROJECT,READ_ODPS_ENDPOINT")
        if not pt or not explain_sql:
            raise ValueError('参数不全')
        search_sql = explain_sql.replace('${pt}', f'{pt}')
        # 查
        rows = []
        headers = None
        with read_odps.execute_sql(search_sql).open_reader(tunnel=True) as reader:
            headers = reader.schema.names
            if not headers:
                raise ValueError('未发现查询列')
            for record in reader:
                vals = record.values
                row = {}
                for ind in range(len(headers)):
                    val = '' if vals[ind] == 'null' else vals[ind]
                    row[headers[ind]] = val
                rows.append(row)
        return {
            "rows": rows,
            "headers": headers
        }

    def write_node_data(self, pt: str = None, rows: list[dict] = None):
        '''
# CREATE TABLE IF NOT EXISTS alsc_tech.alsc_merchant_agi_script_node_data(
# 	id BIGINT COMMENT 'id',
# 	script_name STRING COMMENT '脚本名称',
# 	node STRING COMMENT '所属节点',
# 	data_group STRING COMMENT '数据批次',
# 	content_type STRING COMMENT '数据类型',
# 	source_node STRING COMMENT '源节点',
# 	source_id BIGINT COMMENT '源id',
# 	req_id STRING COMMENT '生成id',
# 	source_nodes STRING COMMENT '源JSON',
# 	content_key STRING COMMENT '数据key',
# 	content STRING COMMENT '数据',
# 	ext_info STRING COMMENT '扩咱JSON',
# 	gmt_create STRING COMMENT '生成时间',
# 	source_data_group STRING COMMENT '源样本批次',
# 	source_content_type STRING COMMENT '源样本类型',
# 	source_content_key STRING COMMENT '源样本key',
# 	source_content STRING COMMENT '源样本内容',
# 	source_ext_info STRING COMMENT '源样本扩展'
# )
# PARTITIONED BY (pt STRING)
# STORED AS ALIORC
# TBLPROPERTIES ('columnar.nested.type'='true')
# LIFECYCLE 365;'''
        if not write_odps:
            raise ValueError('please check sys config WRITE_ODPS_ACCESS_ID,WRITE_ODPS_ACCESS_KEY,WRITE_ODPS_PROJECT,WRITE_ODPS_ENDPOINT')
        if not pt:
            raise ValueError('分区为空')
        pt = 'pt=\'' + pt + '\''
        _table = write_odps.get_table(one_runtime.get_config('WRITE_ODPS_TABLE'))
        if _table.exist_partition(pt):
            _table.delete_partition(pt, if_exists=True)

        # 创建分区
        _table.create_partition(pt)
        if rows:
            # 将数据写入分区
            with _table.open_writer(pt) as writer:
                for record_dict in rows:
                    record = _table.new_record()
                    for column, value in record_dict.items():
                        value = None if is_nan(value) else value
                        record[column] = value
                    writer.write(record)
        return {
            'project': write_odps.project,
            'table': one_runtime.get_config('WRITE_ODPS_TABLE'),
            'row': len(rows)
        }


def refresh_script_root_datas(script_name: str = None, data_group: str = None, refresh_sql: str = None):
    '''
    刷新脚本的初始数据
    :param script_name:
    :param data_group:
    :param refresh_sql:
    :return:
    '''
    if not script_name or not data_group or not refresh_sql:
        raise ValueError('脚本 节点 数据集 刷新sql不可为空')
    # 先查一下
    data_list = odps_storage.read(sql=refresh_sql)
    filters = {
        'data_group': data_group,
        'node': script_name,
        'script_name': script_name
    }
    rows = node_data_storage.select_datas(filters=filters)
    if rows:
        ids = [item.id for item in rows]
        node_data_storage.delete_data_by_ids(ids)
        ids_str = ','.join(map(str, ids))
        # 删除下游
        dele_next_nodes = f"DELETE FROM script_node_data WHERE JSON_EXTRACT(source_nodes, '$.\"{script_name}\"') in ({ids_str})"
        node_data_storage.execute_sql(dele_next_nodes)
        # 删除无效调用关联
        call_record_storage.clean_records()

    node_data_storage.import_datas(data_group, data_list, False, 'refresh_' + str(datetime.datetime.now()), script_name)

# 存储层
base_db_storage = BaseDbStorage()
node_data_storage = ScriptNodeDataStorage()
call_record_storage = ScriptCallRecordStorage()
plan_storage = ScriptPlanStorage()
eval_data_storage = ScriptEvalDataStorage()
embed_storage = EmbedDataStorage()
odps_storage = OdpsStorage()
