import requests
import json
import os
from utils.api_request import search_papers
from typing import List, Dict
import math

def calculate_combined_length(query: str, document: str) -> int:
    """
    计算query和document的组合长度
    """
    return len(query) + len(document)

def split_text(text: str, max_length: int, query_length: int) -> List[str]:
    """
    考虑query长度的情况下分割文本
    
    参数:
        text: 要分割的文本
        max_length: 总长度限制
        query_length: query的长度
    """
    # 留出query的长度
    available_length = max_length - query_length
    if len(text) <= available_length:
        return [text]
    
    parts = []
    for i in range(0, len(text), available_length):
        parts.append(text[i:i + available_length])
    return parts

def batch_rerank_request(query: str, documents: List[str], original_indices: List[int], batch_size: int = 128):
    """
    执行单次重排序请求，确保每个文档和query的组合长度不超过限制
    """
    url = 'https://open.bigmodel.cn/api/paas/v4/rerank'
    api_key = os.getenv("GLM_TOKEN")
    
    headers = {
        'Authorization': api_key,
        'Content-Type': 'application/json',
    }

    # 限制query长度为4096字符
    query = query[:4096]
    query_length = len(query)
    
    # 处理文档确保组合长度不超限
    MAX_COMBINED_LENGTH = 3000  
    valid_documents = []
    valid_indices = []
    
    for doc, orig_idx in zip(documents[:batch_size], original_indices[:batch_size]):
        if calculate_combined_length(query, doc) <= MAX_COMBINED_LENGTH:
            valid_documents.append(doc)
            valid_indices.append(orig_idx)
        else:
            # 对超长文档进行分割
            parts = split_text(doc, MAX_COMBINED_LENGTH, query_length)
            # 只取第一部分，避免重复内容
            valid_documents.append(parts[0])
            valid_indices.append(orig_idx)

    payload = {
        'model': 'rerank',
        'query': query,
        'documents': valid_documents,
        'return_documents': True,
        'return_raw_scores': True
    }

    try:
        response = requests.post(url, headers=headers, json=payload)
        response.raise_for_status()
        result = response.json()
        
        # 更新结果中的索引信息
        if 'results' in result:
            for item in result['results']:
                item['original_index'] = valid_indices[item['index']]
        
        return result
    except requests.exceptions.RequestException as e:
        print(f"批次请求错误: {e}")
        if hasattr(e.response, 'text'):
            print(f"错误详情: {e.response.text}")
        return None

def rerank_documents(query: str, raw_documents: list, top_n: int = 0):
    """
    调用智谱API进行文档重排序，支持大量文档的分批处理
    """
    # 处理文档，将长文本分割
    processed_documents = []
    original_indices = []
    
    query = query[:4000]  # 限制query长度
    query_length = len(query)
    
    for idx, item in enumerate(raw_documents):
        text = item['entity']['chunk_text']
        # 考虑query长度的文档分割
        parts = split_text(text, 2000, query_length) 
        
        for part in parts:
            processed_documents.append(part)
            original_indices.append(idx)

    # 计算需要的批次数
    batch_size = 50
    total_documents = len(processed_documents)
    num_batches = math.ceil(total_documents / batch_size)
    
    # 存储所有批次的结果
    all_results = []
    total_prompt_tokens = 0
    total_tokens = 0

    # 分批处理文档
    for batch_num in range(num_batches):
        start_idx = batch_num * batch_size
        end_idx = min((batch_num + 1) * batch_size, total_documents)
        
        batch_documents = processed_documents[start_idx:end_idx]
        batch_indices = original_indices[start_idx:end_idx]
        
        print(f"处理批次 {batch_num + 1}/{num_batches}, 文档数量: {len(batch_documents)}")
        
        result = batch_rerank_request(query, batch_documents, batch_indices)
        
        if result and 'results' in result:
            all_results.extend(result['results'])
            
            if 'usage' in result:
                total_prompt_tokens += result['usage'].get('prompt_tokens', 0)
                total_tokens += result['usage'].get('total_tokens', 0)

    # 处理所有批次的结果，合并得分
    final_scores = {}
    for item in all_results:
        orig_idx = item['original_index']
        score = item['relevance_score']
        
        if orig_idx not in final_scores or score > final_scores[orig_idx]['relevance_score']:
            final_scores[orig_idx] = {
                'index': orig_idx,
                'relevance_score': score,
                'document': raw_documents[orig_idx]['entity']['chunk_text']
            }

    # 将结果转换为列表并排序
    final_results = list(final_scores.values())
    final_results.sort(key=lambda x: x['relevance_score'], reverse=True)

    if top_n > 0:
        final_results = final_results[:top_n]

    return {
        'results': final_results,
        'usage': {
            'prompt_tokens': total_prompt_tokens,
            'total_tokens': total_tokens
        }
    }


def rerank_query_docs(query: str, raw_documents: list, score_threshold: float = 16.0):
    """
    调用智谱API进行文档重排序，支持大量文档的分批处理
    """
    result = rerank_documents(
        query=query,
        raw_documents=raw_documents,
        top_n=len(raw_documents)
    )
    result_index = []
    for item in result['results']:
        if item['relevance_score'] > score_threshold:
            result_index.append(item['index'])
        else:
            continue
    final_result = [raw_documents[i] for i in result_index]
    return final_result

# 使用示例
if __name__ == "__main__":    
    query = "What is the current status and challenges of Text2SQL research?"
    chunks = search_papers(query,top_k=50)
    
    result = rerank_documents(
        query=query,
        raw_documents=chunks,
        top_n=50
    )
    result_index = []
    for item in result['results']:
        if item['relevance_score'] > 16:
            result_index.append(item['index'])
        else:
            continue
    final_result = [chunks[i] for i in result_index]

    if result:
        print("\n重排序结果:")
        for idx, item in enumerate(result['results'], 1):
            print(f"\n{idx}. 得分: {item['relevance_score']:.4f}")
            print(f"文档摘要: {item['document'][:200]}...")
        
        print("\n使用统计:")
        print(f"总 Prompt Tokens: {result['usage']['prompt_tokens']}")
        print(f"总 Tokens: {result['usage']['total_tokens']}")