import codecs
import datetime
import json
import math
import re
import time
from abc import ABC, abstractmethod
from json import JSONDecodeError
from typing import Union, Generator
from urllib.parse import urlencode
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd
import requests
import one_runtime
import util
from script import script_thread_local, common_storage, merge_thread_local
from script.common_storage import ScriptCallRecord, embed_storage

# 价格估算
price_map = {
    "dashscope-xx": [0.0084 / 1000, 0.0168 / 1000]
}


class LlmRes():
    def __init__(self, status: Union[str, bool] = None, result: str = None, message: str = None, ext_info: dict = None, cost: float = None, messages: list[dict] = None, finished: bool = True, thought: str = None):
        self.status = True if status is None else status
        self.result = result
        self.ext_info = ext_info
        self.message = message
        self.cost = cost
        self.messages = messages
        self.finished = finished
        self.thought = thought

    def check_success(self):
        if not self.is_success():
            raise ValueError('llm异常', self.message)

    def is_success(self):
        return self.status == True

    @classmethod
    def match_keywords(cls, source=None, keywords=None, def_val=None):
        '''
        找出匹配的内容
        :param source: 匹配源
        :param keywords:
        :param def_val
        :return:
        '''
        if keywords is None or source is None:
            return def_val
        if isinstance(source, (int, float)):
            source = str(source)
        elif isinstance(source, bool):
            source = 'true' if source else 'false'

        if not isinstance(source, (str, list, set, tuple)):
            util.log('match_keywords_source_bad_type', {
                'type': type(source)
            })
            return def_val

        # 数组
        if isinstance(keywords, (list, set, tuple)):
            for key in keywords:
                if key in source:
                    return key
        # 字典
        elif isinstance(keywords, dict):
            for key, val in keywords.items():
                if key in source:
                    return val
        else:
            util.log('match_keywords_keywords_bad_type', {
                'type': type(source)
            })
        return def_val

    def match_result(self, keywords=None, def_val=None):
        return LlmRes.match_keywords(source=self.result, keywords=keywords, def_val=def_val)

    def result_is_true(self):
        return LlmRes.match_keywords(source=self.result, keywords={
            'true': True,
            'True': True
        }, def_val=False)

    def result_is_false(self):
        return LlmRes.match_keywords(source=self.result, keywords={
            'false': True,
            'False': True
        }, def_val=False)

    def result_as_json(self):
        try:
            if self.result is None:
                return None
            return json.loads(self.result)
        except:
            # 使用正则表达式移除注释
            try:
                return json.loads(re.sub(r'//.*', '', self.result))
            except Exception as e:
                util.log('llm_json_error', {
                    'result': self.result,
                    'error': e
                })
                return None

    def fetch_json_key(self, key: str = None):
        '''
        :param key: 根据json的key取值
        :return:
        '''
        if not key:
            raise ValueError('key不可为空')
        obj = self.result_as_json()
        if not obj:
            raise ValueError('结果非json')
        return obj.get(key)

    def fetch_segments(self, begin: str = None, end: str = None):
        '''
        根据起止符取某一段
        :param begin: 起始符
        :param end: 结束符
        :return:
        '''
        if not self.result:
            return None

        if begin:
            # 起始符
            startIndex = self.result.find(begin)
            if startIndex == -1:
                raise Exception("分段结果提取失败，结果中不存在" + begin)

            content = self.result[startIndex + len(begin):]
        else:
            content = self.result

        # 结束符
        if not end:
            return content

        endIndex = content.find(end)
        if endIndex == -1:
            return content

        return content[:endIndex]

    def build_ask_messages(self, msg):
        if not msg:
            raise ValueError('追问不可为空')
        if not self.is_success():
            raise ValueError('上轮对话失败')
        history = self.messages.copy()
        history.append({
            'role': 'assistant',
            'content': self.result
        })
        if isinstance(msg, str):
            history.append({
                'role': 'user',
                'content': msg
            })
            return history
        if isinstance(msg, dict):
            history.append(msg)
            return history
        history.append({
            'role': msg.role,
            'content': msg.content
        })
        return history

    def to_dict(self):
        return {
            "status": self.status,
            "result": self.result,
            "message": self.message,
            'cost': self.cost,
            'ext_info': self.ext_info,
            'messages': self.messages,
            'finished': self.finished,
            "thought": self.thought
        }

    # 添加 to_json 方法
    def to_json(self):
        return json.dumps(self.to_dict(), ensure_ascii=False, default=str)

    # 实现 __repr__ 方法
    def __repr__(self):
        return self.to_json()

    # 实现 __repr__ 方法
    def __str__(self):
        return self.to_json()



class OdpsRes:
    def __init__(self, status: Union[str, bool] = None, message: str = None, result: list[dict] = None):
        self.status = status
        self.result = result
        self.message = message

    def is_success(self):
        return self.status == True

    def check_success(self):
        if not self.is_success():
            raise ValueError('odps异常', self.message)

    def to_dict(self):
        return {
            "status": self.status,
            "result": self.result,
            "message": self.message
        }

    # 添加 to_json 方法
    def to_json(self):
        return json.dumps(self.to_dict(), ensure_ascii=False, default=str)

    # 实现 __repr__ 方法
    def __repr__(self):
        return self.to_json()


class SelectData:
    def __init__(self, id: str = None, distance: float = None, content: dict = None, embed_text: str = None, data_group: str = None, gmt_modified: str = None, tags: list[str] = None, keywords: list[str] = None, embedding: list[float] = None):
        self.id = id
        self.distance = distance
        self.content = content
        self.embed_text = embed_text
        self.data_group = data_group
        self.gmt_modified = gmt_modified
        self.tags = tags
        self.keywords = keywords
        self.embedding = embedding

    def to_dict(self):
        return {
            "id": self.id,
            "distance": self.distance,
            "content": self.content,
            'embed_text': self.embed_text,
            'data_group': self.data_group,
            'gmt_modified': self.gmt_modified,
            'tags': self.tags,
            'keywords': self.keywords,
            'embedding': self.embedding
        }

    # 添加 to_json 方法
    def to_json(self):
        return json.dumps(self.to_dict(), ensure_ascii=False, default=str)

    # 实现 __repr__ 方法
    def __repr__(self):
        return self.to_json()


class SelectRes:
    def __init__(self, status: Union[str, bool] = None, message: str = None, result: list[SelectData] = None):
        self.status = status
        self.result = result
        self.message = message

    def is_success(self):
        return self.status == True

    def check_success(self):
        if not self.is_success():
            raise ValueError('search异常', self.message)

    def to_dict(self):
        return {
            "status": self.status,
            "result": [item.to_dict() for item in self.result],
            "message": self.message
        }

    # 添加 to_json 方法
    def to_json(self):
        return json.dumps(self.to_dict(), ensure_ascii=False, default=str)

    # 实现 __repr__ 方法
    def __repr__(self):
        return self.to_json()


# 发gpt请求
def llm_predict(msg: Union[str, dict, list[util.LlmMessage], list[dict]] = None, args: dict = None, **kwargs) -> LlmRes:
    '''
    :param msg: 模板 可以是字符串 也可以是LlmMessage 或者列表[{"role":"user/assistant/system","content":"你好"}]
    :param args: 占位符参数
    :param kwargs: 模型配置参数
    :return:
    '''
    messages: list[dict] = None
    try:
        messages = util.build_messages(msg, args)
        if not messages:
            res = LlmRes(status='no_messages', finished=True)
            return res
    except Exception as e:
        util.log('build_messages_error', {
            'error': e
        })
        res = LlmRes(status='build_messages_error', message=str(e), finished=True)
        return res

    try_num: int = 0
    res: LlmRes = None
    start_time = time.time()
    run_node_data: dict = getattr(script_thread_local, "run_node_data", None)
    merge_data: dict = getattr(merge_thread_local, "merge_data", None)
    money = 0
    link = util.LinkInfo('common_client', 'llm_predict', {
        'config': kwargs,
        'messages': messages
    })
    try:
        link.step_in()
        while True:
            try_num += 1
            if kwargs.get('model') == 'dashscope':
                result_obj = next(__dashscope(messages, stream=False, **kwargs))
            elif kwargs.get('model').startswith('gpt-'):
                result_obj = next(__openai(messages, stream=False, **kwargs))
            else:
                raise ValueError('不支持的模型')

            # 成功
            if not result_obj.get('fail'):
                result = result_obj.pop('content')
                cost = result_obj.pop('cost')
                finished = result_obj.pop('finished')
                # 算钱
                try:
                    real_model = 'dashscope-' + result_obj.get('model_name')
                    if result_obj.get('promptTokens'):
                        money += int(result_obj.get('promptTokens')) * price_map.get(real_model)[0]
                    if result_obj.get('completionTokens'):
                        money += int(result_obj.get('completionTokens')) * price_map.get(real_model)[1]
                    result_obj['money'] = money
                    if run_node_data:
                        run_node_data.get('task').money += money
                    if merge_data:
                        merge_data['money'] += money
                except:
                    pass
                res = LlmRes(result=result, messages=messages, cost=cost, ext_info=result_obj, finished=finished)
                link.step_out(result=res.to_dict() if res else None)
                return res

            code = result_obj.get('code')
            message = result_obj.get('message')

            err_str = str(code) + str(message)

            safe_error = 'Connection aborted' in err_str or '模型使用人数较多导致服务负载过高' in err_str or 'API block error' in err_str or 'exceeded expected application' in err_str

            util.log('llm_error', {
                'try_num': try_num,
                'safe_error': safe_error
            })
            if safe_error and try_num < 10:
                time.sleep(3 * try_num)
            else:
                res = LlmRes(status='llm_error', messages=messages, message=message, ext_info=result_obj, finished=True)
                link.step_out(error={
                    'errorCode': 'max_retry_time'
                }, status='llm_error')
                return res
    except Exception as e:
        util.log('llm_code_error', {
            'error': e
        })
        res = LlmRes(status='llm_code_error', messages=messages, message=str(e), finished=True)
        link.step_out(error=e, status='llm_code_error')
        return res
    finally:
        ext_info = res.ext_info if res.ext_info is not None else {}
        ext_info['try_num'] = try_num
        link.step_out(result=res.to_dict() if res else None)
        link.digest_log()
        # 记录db日志
        if run_node_data:
            # 新增日志
            # {'task': task, 'script_name': script_name, 'node': node, 'data_group': data_group,
            #                                              'worker_num': worker_num}
            # 创建一个字典，将 args 解包到该字典中
            args = {
                'messages': messages
            }
            args.update(kwargs)
            call_record = ScriptCallRecord(script_name=run_node_data.get('script_name'), node=run_node_data.get('node'), source_id=run_node_data.get('data').id, req_id=run_node_data.get('req_id'), client_type='llm', params=args, param_key=str(abs(hash(str(args)) if args else hash(''))),
                                           status=res.status, cost=math.floor((time.time() - start_time) * 1000), result=res.result, ext_info=ext_info, gmt_create=datetime.datetime.now())
            common_storage.call_record_storage.add_record(call_record)


def llm_predict_stream(msg: Union[str, dict, list[util.LlmMessage], list[dict]] = None, params: dict = None, **kwargs) -> Generator[LlmRes, None, None]:
    '''
    :param msg: 模板 可以是字符串 也可以是LlmMessage 或者列表[{"role":"user/assistant/system","content":"你好"}]
    :param params: 占位符参数
    :param kwargs: 模型配置参数
    :return:
    '''
    messages: list[dict] = None
    try:
        messages = util.build_messages(msg, params)
        if not messages:
            res = LlmRes(status='no_messages', finished=True)
            yield res
            return
    except Exception as e:
        util.log('build_messages_error', {
            'error': e
        })
        res = LlmRes(status='build_messages_error', message=str(e), finished=True)
        yield res
        return
    try_num: int = 0
    res: LlmRes = None
    start_time = time.time()
    run_node_data: dict = getattr(script_thread_local, "run_node_data", None)
    merge_data: dict = getattr(merge_thread_local, "merge_data", None)
    money = 0
    link = util.LinkInfo('common_client', 'llm_predict_stream', {
        'config': kwargs,
        'messages': messages,
        'params': params
    })
    try:
        link.step_in()
        while True:
            try_num += 1
            if kwargs.get('model') == 'dashscope':
                result_gen = __dashscope(messages, stream=True, **kwargs)
            elif kwargs.get('model').startswith('gpt-'):
                result_gen = __openai(messages, stream=True, **kwargs)
            else:
                raise ValueError('不支持的模型')

            for result_obj in result_gen:
                # 成功
                if not result_obj.get('fail'):
                    result = result_obj.pop('content')
                    cost = result_obj.pop('cost')
                    finished = result_obj.get('finished')
                    if finished:
                        try:
                            real_model = 'dashscope-' + result_obj.get('model_name')
                            if result_obj.get('promptTokens'):
                                money += int(result_obj.get('promptTokens')) * price_map.get(real_model)[0]
                            if result_obj.get('completionTokens'):
                                money += int(result_obj.get('completionTokens')) * price_map.get(real_model)[1]
                            result_obj['money'] = money
                            if run_node_data:
                                run_node_data.get('task').money += money
                            if merge_data:
                                merge_data['money'] += money
                        except:
                            pass
                    res = LlmRes(result=result, messages=messages, cost=cost, ext_info=result_obj, finished=finished)
                    if finished:
                        link.step_out(result=res.to_dict())
                    yield res
                    if finished:
                        return
                    continue
                code = result_obj.get('code')
                message = result_obj.get('message')

                err_str = str(code) + str(message)

                safe_error = 'Connection aborted' in err_str or '模型使用人数较多导致服务负载过高' in err_str or 'API block error' in err_str or 'exceeded expected application' in err_str

                util.log('llm_error', {
                    'try_num': try_num,
                    'safe_error': safe_error
                })
                if safe_error and try_num < 10:
                    time.sleep(3 * try_num)
                else:
                    res = LlmRes(status='llm_error', messages=messages, message=message, ext_info=result_obj, finished=True)
                    link.step_out(error={
                        'errorCode': 'max_retry_time'
                    }, status='llm_error')
                    yield res
                    return
    except Exception as e:
        util.log('llm_code_error', {
            'error': e
        })
        res = LlmRes(status='llm_code_error', messages=messages, message=str(e), finished=True)
        link.step_out(error=e)
        yield res
    finally:
        ext_info = res.ext_info if res.ext_info is not None else {}
        ext_info['try_num'] = try_num
        link.step_out(result=res.to_dict() if res else None)
        link.digest_log()

        # 记录db日志
        if run_node_data:
            # 新增日志
            # {'task': task, 'script_name': script_name, 'node': node, 'data_group': data_group,
            #                                              'worker_num': worker_num}
            # 创建一个字典，将 args 解包到该字典中
            params = {
                'messages': messages
            }
            params.update(kwargs)
            call_record = ScriptCallRecord(script_name=run_node_data.get('script_name'), node=run_node_data.get('node'), source_id=run_node_data.get('data').id, req_id=run_node_data.get('req_id'), client_type='llm', params=params, param_key=str(abs(hash(str(params)) if params else hash(''))),
                                           status=res.status, cost=math.floor((time.time() - start_time) * 1000), result=res.result, ext_info=ext_info, gmt_create=datetime.datetime.now())
            common_storage.call_record_storage.add_record(call_record)



def __dashscope(messages: list[dict] = None, stream: bool = False, **kwargs) -> Generator[dict, None, None]:
    '''
    灵积模型 出入参参考https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-qianwen-7b-14b-72b-api-detailes?spm=a2c4g.11186623.0.0.242512b0D0oRcJ
    :param messages: 非空
    :param kwargs: 参数
    :return:
    '''
    model = 'dashscope'
    model_name = kwargs.get('model_name')
    key = kwargs.get('ak') if kwargs.get('self_key') is True else one_runtime.get_config('DASHSCOPE_KEY')
    if not model_name or not key or not messages:
        yield {
            'fail': True,
            'code': 'model_name key model_name is null',
            'finished': True,
            'message': 'please check sys config DASHSCOPE_KEY,https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-qianwen-7b-14b-72b-api-detailes?spm=a2c4g.11186623.0.0.242512b0D0oRcJ'
        }
        return
    payload_obj = {
        'model': model_name,
        'input': {
            "messages": messages
        },
        "parameters": {
        }
    }
    stop = kwargs.get('stop')
    if stop:
        payload_obj['parameters']['stop'] = stop
    temperature = kwargs.get('temperature')
    if temperature:
        payload_obj['parameters']['temperature'] = float(temperature)
    top_k = kwargs.get('top_k')
    if top_k:
        payload_obj['parameters']['top_k'] = int(top_k)
    max_tokens = kwargs.get('max_tokens')
    if max_tokens:
        payload_obj['parameters']['max_tokens'] = int(max_tokens)
    headers = {
        'Content-Type': 'application/json',
        'Authorization': 'Bearer ' + key
    }
    start_time = time.time()
    url = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
    request_id = None
    # 非流式
    if not stream:
        try:
            res = requests.request("POST", url, headers=headers, stream=False, data=json.dumps(payload_obj), timeout=300)
            ret = json.loads(res.text)
            request_id = ret.get("request_id")
            try:
                res.raise_for_status()
            except:
                raise ValueError(f'{res.text}')

            output = ret.get('output')
            if not output:
                raise ValueError('获取output失败,'+str(ret))
            data = {
                'content': output.get('text'),
                'promptTokens': ret.get('usage').get('input_tokens') if ret.get('usage') else None,
                'completionTokens':  ret.get('usage').get('output_tokens') if ret.get('usage') else None
            }
        except Exception as e:
            data = {
                'fail': True,
                'code': 'http_error',
                'message': str(e)
            }
        finally:
            data.update({
                'request_id': request_id,
                'model': model,
                'model_name': model_name,
                'cost': round(time.time() - start_time, 1),
                'finished': True
            })
        yield data
        return

    #     流式
    try:
        headers['X-DashScope-SSE'] = 'enable'
        res = requests.request("POST", url, headers=headers, stream=True, data=json.dumps(payload_obj), timeout=300)
        decoder = codecs.iterdecode(res.iter_lines(), 'utf-8')
        for line in decoder:
            if not line:
                continue
            try:
                json.loads(line)
                raise ValueError(f'请求失败,非流式响应,{line}')
            except JSONDecodeError:
                pass
            if not line.startswith('data:'):
                continue

            data = {
                'model': model,
                'model_name': model_name,
                'cost': round(time.time() - start_time, 1),
                'finished': False
            }
            cur_content = json.loads(line[5:])
            request_id = cur_content.get('request_id')
            data['request_id'] = request_id
            output = cur_content.get('output')
            if not output:
                raise ValueError('获取output失败,'+str(cur_content))
            finish_reason = output.get('finish_reason') if output.get('finish_reason') is not None and output.get('finish_reason') != 'null' else None
            if finish_reason:
                data['content'] = output.get('text')
                data['promptTokens'] = cur_content.get('usage').get('input_tokens') if cur_content.get('usage') else None
                data['completionTokens'] = cur_content.get('usage').get('output_tokens') if cur_content.get('usage') else None
                data['finished'] = True
                yield data
                return
            else:
                data['content'] = output.get('text')
                yield data
    except Exception as e:
        yield {
            'fail': True,
            'code': 'http_error',
            'message': str(e),
            'request_id': request_id,
            'model': model,
            'model_name': model_name,
            'cost': round(time.time() - start_time, 1),
            'finished': True
        }

def __openai(messages: list[dict] = None, stream: bool = False, **kwargs) -> Generator[dict, None, None]:
    '''
    灵积模型 出入参参考https://help.aliyun.com/zh/dashscope/developer-reference/tongyi-qianwen-7b-14b-72b-api-detailes?spm=a2c4g.11186623.0.0.242512b0D0oRcJ
    :param messages: 非空
    :param kwargs: 参数
    :return:
    '''
    model = 'openai'
    model_name = kwargs.get('model')[4:]
    key = kwargs.get('ak') if kwargs.get('self_key') is True else one_runtime.get_config('OPENAI_KEY')
    domain = kwargs.get('domain') if kwargs.get('self_key') is True else one_runtime.get_config('OPENAI_DOMAIN')
    if not domain or not model_name or not key or not messages:
        yield {
            'fail': True,
            'code': 'domain key model_name is null',
            'finished': True,
            'message': 'please check sys config OPENAI_DOMAIN, OPENAI_KEY'
        }
        return
    payload_obj = {
        'model': model_name,
        "messages": messages
    }
    stop = kwargs.get('stop')
    if stop:
        payload_obj['stop'] = stop
    temperature = kwargs.get('temperature')
    if temperature:
        payload_obj['temperature'] = float(temperature)
    top_k = kwargs.get('top_k')
    if top_k:
        payload_obj['top_k'] = int(top_k)
    max_tokens = kwargs.get('max_tokens')
    if max_tokens:
        payload_obj['max_tokens'] = int(max_tokens)
    headers = {
        'Content-Type': 'application/json',
        'Authorization': 'Bearer ' + key
    }
    start_time = time.time()
    url = domain+"/v1/chat/completions"
    request_id = None
    # 非流式
    if not stream:
        try:
            res = requests.request("POST", url, headers=headers, stream=False, data=json.dumps(payload_obj), timeout=300)
            try:
                res.raise_for_status()
            except:
                raise ValueError(f'{res.text}')
            ret = json.loads(res.text)
            if ret.get('error'):
                raise ValueError(f'{ret.get("error").get("message")}')
            request_id = ret.get("id")
            usage = ret.get('usage')
            output = ret.get('choices')[0]
            if not output:
                raise ValueError('获取output失败,'+str(ret))
            data = {
                'content': output.get('message').get('content'),
                'promptTokens': usage.get('prompt_tokens') if usage else None,
                'completionTokens':  usage.get('completion_tokens') if usage else None
            }
        except Exception as e:
            data = {
                'fail': True,
                'code': 'http_error',
                'message': str(e)
            }
        finally:
            data.update({
                'request_id': request_id,
                'model': model,
                'model_name': model_name,
                'cost': round(time.time() - start_time, 1),
                'finished': True
            })
        yield data
        return

    #     流式
    try:
        # headers['X-DashScope-SSE'] = 'enable'
        payload_obj['stream'] = True
        res = requests.request("POST", url, headers=headers, stream=True, data=json.dumps(payload_obj), timeout=300)
        decoder = codecs.iterdecode(res.iter_lines(), 'utf-8')
        last_content = ''
        for line in decoder:
            if not line:
                continue
            try:
                json.loads(line)
                raise ValueError(f'请求失败,非流式响应,{line}')
            except JSONDecodeError:
                pass
            if not line.startswith('data:'):
                continue

            data = {
                'model': model,
                'model_name': model_name,
                'cost': round(time.time() - start_time, 1),
                'content':last_content,
                'finished': False
            }
            if line[5:].strip() == '[DONE]':
                data['finished'] = True
                yield data
                return
            cur_content = json.loads(line[5:])
            request_id = cur_content.get('id')
            data['request_id'] = request_id
            output = cur_content.get('choices')
            if not output or 'delta' not in output[0]:
                raise ValueError('获取output失败,'+str(cur_content))
            output = output[0]
            finish_reason = output.get('finish_reason') if output.get('finish_reason') is not None and output.get('finish_reason') != 'null' else None
            if finish_reason:
                data['finished'] = True
                yield data
                return
            elif output.get('delta').get('content') is not None:
                data['content'] = data['content'] + output.get('delta').get('content')
                last_content = data['content']
                yield data
    except Exception as e:
        yield {
            'fail': True,
            'code': 'http_error',
            'message': str(e),
            'request_id': request_id,
            'model': model,
            'model_name': model_name,
            'cost': round(time.time() - start_time, 1),
            'finished': True
        }


# 运行odps
def read_odps(sql_template: str = None, params: dict = None) -> OdpsRes:
    '''
    :param sql_template: sql 模板  例如 select * from alsc_tech.shaopeng1 where dt='${bizdate}' and col like '%${keys}%'
    :param params: 参数 替换模板中的占位符 例如 {"bizdate":"20231201","keys":"红包"}
    :return:
    '''
    if not sql_template:
        return OdpsRes(status='no_template', message="sql不可为空")
    sql = None
    try:
        params = params if params else {}
        pattern = re.compile(r'\$\{(\w+)\}')

        def replacer(match):
            # 使用get方法从params中取值，如果键不存在则使用''作为默认值
            return str(params.get(match.group(1), ''))

        sql = pattern.sub(replacer, sql_template)
    except:
        return OdpsRes(status='build_sql_error', message="sql构建失败")

    try:
        result = common_storage.odps_storage.read(sql)
        return OdpsRes(status=True, result=result)
    except Exception as e:
        return OdpsRes(status='execute_sql_error', message="sql执行失败," + str(e))


# 选择器
class DataSelector(ABC):
    def __init__(self, embed_scope: str = None, data_group: list[str] = None):
        self.embed_scope = embed_scope
        self.data_group = data_group

    def select(self, query_texts: Union[str, list[str]] = None, size: int = 10, param: dict = {}) -> SelectRes:
        if not query_texts:
            return SelectRes(status='param_error', message="查询文本不可为空")
        try:
            query_texts = query_texts if isinstance(query_texts, list) else [query_texts]
            size = size if size else 10
            param = param if param else {}
            datas = self._do_select(query_texts, size, param)
            return SelectRes(status=True, result=datas)
        except Exception as e:
            return SelectRes(status='select_error', message="select执行失败," + str(e))

    @abstractmethod
    def _do_select(self, query_texts: Union[str, list[str]] = None, size: int = None, param: dict = None) -> list[SelectData]:
        '''
        :param query_texts:  查询文本 or 文本列表 非空
        :param size: 数量 非空
        :param param: 定制参数 字典 非空
        :return:
        '''
        pass

    def search_chroma(self, query_embeddings: Union[list[float], list[list[float]]] = None, query_texts: Union[str, list[str]] = None, size: int = None) -> (list[float], list[dict]):
        '''
        :param query_embeddings: 查询的向量 or 向量列表
        :param query_texts: 查询的文本 or 文本列表
        :param size: 召回数量
        :return: (首个embedding 以及 查询结果)
        '''
        return embed_storage.search_chroma(embed_scope=self.embed_scope, data_group=self.data_group, query_embeddings=query_embeddings, query_texts=query_texts, size=size)

    def embedding(self, query_texts: list[str] = None) -> list[list[float]]:
        return embed_storage.embedding(texts=query_texts)

    def build_select_data(self, item):
        return SelectData(id=item.get('id'), distance=item.get('distance'), content=item.get('content'), embed_text=item.get('embed_text'), data_group=item.get('data_group'), gmt_modified=item.get('gmt_modified'), tags=item.get('tags'), keywords=item.get('keywords'), embedding=item.get('embedding'))

    # mmr
    def mmr(self, embedding: list[float], datas: list[Union[dict, SelectData]], balance_factor=0.5, k=4):
        # 从datas中提取所有候选embedding列表
        candidate_embeddings = [item.get('embedding') if isinstance(item, dict) else item.embedding for item in datas]
        """
        Compute the Maximal Marginal Relevance for a given list of embeddings.

        :param embedding: The reference embedding as a list of floats (query embedding).
        :param candidate_embeddings: A list of embedding lists to consider for the MMR.
        :param balance_factor: A float that balances relevance with diversity (0 <= lambda_mult <= 1).
        :param k: The number of items to pick.

        :return: A list of indices representing the items with maximal marginal relevance.
        """
        # Validate input dimensions
        if np.ndim(embedding) == 1:
            embedding = np.expand_dims(embedding, 0)

        # Compute similarity of all items to the query embedding
        similarities_to_query = cosine_similarity(embedding, candidate_embeddings)[0]

        # Start with the most similar item
        most_similar_idx = int(np.argmax(similarities_to_query))
        idxs = [most_similar_idx]

        # Select embeddings in the list based on MMR
        selected_embeddings = np.array([candidate_embeddings[most_similar_idx]])

        # Iteratively select the rest of the items
        while len(idxs) < min(k, len(candidate_embeddings)):
            best_score = -np.inf
            idx_to_add = None

            for i, similarity_to_query in enumerate(similarities_to_query):
                if i in idxs:
                    continue

                # Compute the similarity to already selected items
                similarity_to_selected = cosine_similarity([candidate_embeddings[i]], selected_embeddings)
                max_similarity_to_selected = np.max(similarity_to_selected)

                # Calculate the MMR score
                mmr_score = balance_factor * similarity_to_query - (1 - balance_factor) * max_similarity_to_selected

                # Update the best score and idx to add
                if mmr_score > best_score:
                    best_score = mmr_score
                    idx_to_add = i

            # Add the best item to the list and its embedding to the selected pool
            idxs.append(idx_to_add)
            selected_embeddings = np.vstack((selected_embeddings, [candidate_embeddings[idx_to_add]]))

        return idxs


# 普通召回
class SimpleSelector(DataSelector):

    def _do_select(self, query_texts: Union[str, list[str]] = None, size: int = 10, param: dict = {}) -> list[SelectData]:
        # 向量化
        query_embeddings = self.embedding(query_texts=query_texts)
        if not param.get('mmr'):
            first_embeddings, datas = self.search_chroma(query_embeddings=query_embeddings, size=size)
            return [self.build_select_data(item) for item in datas]

        balance_factor = param.get('balance_factor')
        # 召回 扩大三倍
        first_embeddings, datas = self.search_chroma(query_embeddings=query_embeddings, size=size * 3)
        # 使用MMR筛选最终结果的索引
        selected_indices = self.mmr(first_embeddings, datas, k=size, balance_factor=balance_factor if balance_factor else 0.5)
        # 根据筛选出的索引，构建最终的结果集
        return [self.build_select_data(datas[idx]) for idx in selected_indices]


def trim_messages(lst: list[dict], max_length: int) -> list[dict]:
    # 确保列表至少有一个元素
    if not lst:
        return []

    # 从后向前处理列表
    lst.reverse()
    total_length = len(lst[0].get('content', ''))
    result = [lst[0]]

    for item in lst[1:]:
        curr_length = len(item.get('content', ''))
        if total_length + curr_length <= max_length:
            total_length += curr_length
            result.append(item)
        else:
            break

    # 由于我们反转了列表，所以要再次反转回来
    result.reverse()
    return result


def odps_to_excel(sql_template: str = None, params: dict = None, excel_name: str = None):
    '''
    写odps查询到excel 比拷贝好用
    :param sql_template: sql模板
    :param params: sql参数
    :param excel_name: 文件名称，默认为年月日
    :return:
    '''
    odps_res: OdpsRes = read_odps(sql_template, params)
    odps_res.check_success()
    # 将其转化为DataFrame对象
    df = pd.DataFrame(odps_res.result)
    for column in df.columns:
        df[column] = df[column].fillna('').astype(str)
    # 获取当前日期并格式化为yyyymmdd
    if not excel_name:
        excel_name = datetime.datetime.now().strftime('%Y%m%d%H')
    # 定义文件路径，注意这里的路径可能需要你根据实际情况进行修改
    # macOS系统
    file_path = f'/Users/wangshaopeng/Desktop/{excel_name}.xlsx'
    # 输出到excel
    df.to_excel(file_path, index=False)
