import copy
import json
import numpy as np
from scipy.spatial.distance import cosine
from gravityassist.config import get_option
from gravityassist.api import client, retry_api
import tiktoken

# print(enc.encode_ordinary('{"id": 7, "name": "【IXI。】透明少女【niroku】"}'))
# print(enc.encode_ordinary('{"id":7,"name":"【IXI。】透明少女【niroku】"}'))
# print(enc.encode_ordinary('...'))

# load_persistent_data('instance', False)

def auto_embed_messages(thread_data, model='gpt-3.5-turbo') -> tuple[list[dict], int]:
    messages = json.loads(thread_data.messages)
    threscale = get_option('embedding_threshold', 0.5)
    if threscale >= 1 or len(messages) < get_option('embedding_min_length', len(messages) + 1):
        return copy.deepcopy(messages), 0
    embeddings = thread_data.embeddings
    if not embeddings:
        embeddings = []
    else:
        embeddings = json.loads(embeddings)

    # messages.append({'role': 'user', 'content': '什么是文化大革命'})
    # messages.append({'role': 'user', 'content': '什么是滲音かこい'})

    # 提取所有消息的content
    new_contents = [''.join(f"{c['function']['name']}({c['function']['arguments']})" for c in message.get('tool_calls', ()) if c['type'] == 'function') + (message['content'] or '') for message in messages[len(embeddings):]]

    # 使用OpenAI的API获取embeddings
    new_embeddings = retry_api(client.embeddings.create,
        model="text-embedding-3-large",
        input=new_contents,
        timeout=2,
    )
    # if new_embeddings.usage:
    #     usage_count(new_embeddings.usage.prompt_tokens, 0, 'text-embedding-3-large')
    enc = tiktoken.encoding_for_model(model)
    new_num_tokens = [len(e) for e in enc.encode_batch(new_contents)]
    new_embeddings = [{'embedding': new_embedding.embedding, 'length': new_num_token} for new_embedding, new_num_token in zip(new_embeddings.data, new_num_tokens)]

    embeddings += new_embeddings
    thread_data.embeddings = json.dumps(embeddings, ensure_ascii=False, separators=(',', ':'))

    omit_count = 0
    new_messages = copy.deepcopy(messages)
    # 计算最后两条消息与其他每条消息的相关性
    last_two_embeddings = [embeddings[-2]['embedding'], embeddings[-1]['embedding']]
    for i, embedding in enumerate(embeddings[:-2]):
        similarity_to_last_two = [1 - cosine(embedding['embedding'], last_emb) for last_emb in last_two_embeddings]
        similarity_to_last_two = similarity_to_last_two[0] * 0.25 + similarity_to_last_two[1] * 0.75
        # 较远的或较大的数据，会被优先移除（低阈值）
        length_factor = 0.375 + np.tanh(0.125 + embedding['length'] / 128) * 0.5
        distance_to_last_two = min(32, (len(messages) - i - 0.75) * length_factor)
        if i == 0:
            distance_to_last_two = 2.375
        threshold = (1 - 2 / distance_to_last_two) * min(1, max(0, threscale))
        # print(i, f"{repr(new_contents[i])}, similarity with last two: {similarity_to_last_two:.4f}, threshold: {threshold:.4f}, length factor: {length_factor:.4f}")
        if similarity_to_last_two < threshold:
            new_messages[i]['content'] = '...'
            omit_count += 1

    return new_messages, omit_count

# 处理相关性
# thread_data = list(persistent_data()['threads'].values())[5]
# thread_data['messages'].append({'role': 'user', 'content': '播放taobien'})
# messages = auto_embed_messages(thread_data)  # 此函数的返回值是经过相关性处理后的messages数组
# print(messages)
# 处理完以后，messages数组中的每条消息都会被标记为相关或不相关
# 现在messages数组中不相关的消息已经被替换为'...'

# print(messages)
# contents = [''.join(f"{c['function']['name']}({c['function']['arguments']})" for c in message.get('tool_calls', ()) if c['type'] == 'function') + (message['content'] or '') for message in messages]
# new_num_tokens = [len(e) for e in enc.encode_batch(contents)]
# print(sum(num_tokens), "->", sum(new_num_tokens))
