import os
import asyncio
import random
import time
from lightrag import LightRAG, QueryParam
from lightrag.llm import openai_complete_if_cache, openai_embedding
from lightrag.utils import EmbeddingFunc
import numpy as np
import aiohttp
import json
import traceback

WORKING_DIR = "./dickens"
if not os.path.exists(WORKING_DIR):
    os.mkdir(WORKING_DIR)

class RateLimiter:
    def __init__(self, tokens, refill_rate):
        self.tokens = tokens
        self.refill_rate = refill_rate
        self.last_refill = time.time()

    async def wait(self):
        while self.tokens < 1:
            now = time.time()
            time_passed = now - self.last_refill
            self.tokens = min(self.tokens + time_passed * self.refill_rate, self.refill_rate)
            self.last_refill = now
            if self.tokens < 1:
                await asyncio.sleep(0.1)
        self.tokens -= 1

rate_limiter = RateLimiter(tokens=10, refill_rate=1)  # 每秒10个请求

async def llm_model_func(
    prompt, system_prompt=None, history_messages=[], **kwargs
) -> str:
    url = "https://api.siliconflow.cn/v1/chat/completions"
    headers = {
        "Authorization": f"Bearer sk-tyngssliygwyohfxqapauityhtzyizmcqiqahonyxytxvcea",
        "Content-Type": "application/json"
    }
    
    messages = []
    if system_prompt:
        messages.append({"role": "system", "content": system_prompt})
    
    messages.extend(history_messages)
    messages.append({"role": "user", "content": prompt})
    
    payload = {
        "model": "Qwen/Qwen2.5-7B-Instruct",
        "messages": messages,
        "temperature": kwargs.get("temperature", 0.7),
        "max_tokens": kwargs.get("max_tokens", 768),
        "stream": False
    }
    max_retries = 20
    base_delay = 1  # 初始延迟1秒
    for attempt in range(max_retries):
        try:
            await rate_limiter.wait()
            async with aiohttp.ClientSession() as session:
                async with session.post(url, headers=headers, json=payload) as response:
                    if response.status == 200:
                        result = await response.json()
                        if not result.get("choices"):
                            raise ValueError(f"Unexpected API response format: {result}")
                        return result["choices"][0]["message"]["content"]
                    elif response.status == 429:
                        delay = (base_delay * 2 ** attempt) + (random.randint(0, 1000) / 1000)
                        print(f"Rate limited. Retrying in {delay:.2f} seconds...")
                        await asyncio.sleep(delay)
                    else:
                        raise ValueError(f"API request failed with status {response.status}")
        except Exception as e:
            if attempt == max_retries - 1:
                raise
            print(f"Attempt {attempt + 1} failed: {str(e)}. Retrying...")
            await asyncio.sleep(1)
    raise ValueError("Max retries reached. Unable to complete the request.")

async def embedding_func(texts: list[str]) -> np.ndarray:
    url = "https://api.siliconflow.cn/v1/embeddings"
    headers = {
        "Authorization": f"Bearer sk-tyngssliygwyohfxqapauityhtzyizmcqiqahonyxytxvcea",
        "Content-Type": "application/json"
    }
    
    payload = {
        "model": "BAAI/bge-m3",
        "input": texts,
        "encoding_format": "float"
    }
    max_retries = 5
    base_delay = 1
    for attempt in range(max_retries):
        try:
            await rate_limiter.wait()
            async with aiohttp.ClientSession() as session:
                async with session.post(url, headers=headers, json=payload) as response:
                    if response.status == 200:
                        result = await response.json()
                        # print(f"API Response: {result}")  # 打印完整的 API 响应
                        if 'data' not in result or not isinstance(result['data'], list):
                            raise ValueError(f"Unexpected API response format: {result}")
                        embeddings = [item['embedding'] for item in result['data'] if 'embedding' in item]
                        if not embeddings:
                            raise ValueError("No valid embeddings found in the response")
                        return np.array(embeddings)
                    elif response.status == 429:
                        delay = (base_delay * 2 ** attempt) + (random.randint(0, 1000) / 1000)
                        print(f"Rate limited. Retrying in {delay:.2f} seconds...")
                        await asyncio.sleep(delay)
                    else:
                        error_text = await response.text()
                        print(f"API request failed with status {response.status}")
                        print(f"Error response: {error_text}")
                        raise ValueError(f"API request failed with status {response.status}: {error_text}")
        except Exception as e:
            if attempt == max_retries - 1:
                raise
            print(f"Attempt {attempt + 1} failed: {str(e)}. Retrying...")
            await asyncio.sleep(1)
    raise ValueError("Max retries reached. Unable to complete the request.")

async def get_embedding_dim():
    test_text = ["This is a test sentence."]
    try:
        embedding = await embedding_func(test_text)
        if embedding is None:
            raise ValueError("Embedding function returned None")
        if isinstance(embedding, np.ndarray):
            if embedding.ndim == 2:
                return embedding.shape[1]
            else:
                return embedding.shape[0]
        else:
            raise ValueError(f"Unexpected embedding type: {type(embedding)}")
    except Exception as e:
        print(f"Error in get_embedding_dim: {e}")
        raise

async def test_funcs():
    result = await llm_model_func("How are you?")
    print("llm_model_func: ", result)
    result = await embedding_func(["How are you?"])
    print("embedding_func: ", result)

async def main():
    try:
        embedding_dimension = await get_embedding_dim()
        print(f"Detected embedding dimension: {embedding_dimension}")
        rag = LightRAG(
            working_dir=WORKING_DIR,
            llm_model_func=llm_model_func,
            embedding_func=EmbeddingFunc(
                embedding_dim=embedding_dimension,
                max_token_size=8192,
                func=embedding_func,
            ),
        )
        with open("./book.txt", "r", encoding="utf-8") as f:
            content = f.read()
            chunk_size = 1000  # 调整这个值以适应您的需求
            chunks = [content[i:i+chunk_size] for i in range(0, len(content), chunk_size)]
            for i, chunk in enumerate(chunks):
                try:
                    await rag.ainsert(chunk)
                    print(f"Successfully inserted chunk {i+1}/{len(chunks)}")
                except Exception as e:
                    print(f"Error inserting chunk {i+1}: {e}")
                await asyncio.sleep(1)  # 在每次插入后添加短暂延迟

        for mode in ["naive", "local", "global", "hybrid"]:
            try:
                result = await rag.aquery(
                    "文章的主题是什么（中文回答）?", param=QueryParam(mode=mode)
                )
                print(f"Result for {mode} search:")
                print(result)
            except Exception as e:
                print(f"Error in {mode} search: {e}")

    except Exception as e:
        print(f"An error occurred: {e}")
        traceback.print_exc()

if __name__ == "__main__":
    asyncio.run(main())