import requests
import numpy as np
import configparser

config = configparser.ConfigParser()
config.read("config.ini")

EMBEDDING_ENDPOINT_URL = config.get('embedding','endpoint_url')
MODEL_NAME = config.get('embedding','model_name')

def get_embeddings(texts):
    headers = {'Content-Type':'application/json'}
    data = {'input':texts,'model':MODEL_NAME}
    try:
        response = requests.post(EMBEDDING_ENDPOINT_URL,headers=headers,json=data,timeout=10)
        response.raise_for_status()
        response_data = response.json()
        if 'data' not in response_data or not response_data['data']:
            raise ValueError('API返回为空')
        embeddings = [item['embedding'] for item in response_data['data']]
        return embeddings
    except requests.exceptions.RequestException as e:
        raise ValueError(f'Error parsing API response: {e}')

def cosine_similarity(vec_a, vec_b):
    vec_a = np.array(vec_a)
    vec_b = np.array(vec_b)
    dot_product = np.dot(vec_a, vec_b)
    norm_a = np.linalg.norm(vec_a)
    norm_b = np.linalg.norm(vec_b)
    if norm_a == 0 or norm_b == 0:
        return 0.0
    return dot_product / (norm_a * norm_b)


def find_cliff_split_index(scores):
    if len(scores) <= 2:
        return len(scores)
    diffs = [scores[i] - scores[i + 1] for i in range(len(scores) - 1)]
    if not diffs:
        return len(scores)
    max_diff_index = np.argmax(diffs)
    split_index = max_diff_index + 1
    if max(diffs) < 0.05:
        return len(scores)
    return split_index


def origin(input_data:dict):
    query = input_data.get('query')
    documents = input_data.get('documents')

    if not query or not isinstance(query, str):
        raise ValueError('query 为非空字符串')
    if not documents or not isinstance(documents, list):
        raise ValueError('documents 为非空列表')

    doc_texts = [doc.get('text','') for doc in documents]
    all_texts_to_embed = [query] + doc_texts
    embeddings = get_embeddings(all_texts_to_embed)

    query_vec = embeddings[0]
    doc_vecs = embeddings[1:]

    similarities = []
    for i,vec in enumerate(doc_vecs):
        score = cosine_similarity(query_vec,vec)
        similarities.append({'score':score,'document':documents[i]})

    sorted_similarities = sorted(similarities,key=lambda x:x['score'],reverse=True)

    top_3_results = [item['document'] for item in sorted_similarities[:3]]

    scores = [item['score'] for item in sorted_similarities]
    natural_split_index = find_cliff_split_index(scores)
    final_split_index = min(natural_split_index,3)
    cliff_split_results = [item['document'] for item in sorted_similarities[:final_split_index]]

    return {
        'top_3':top_3_results,
        'result':cliff_split_results,
    }