import json
import uuid
import time
import httpx
from celery import shared_task
from django.core.cache import cache
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync


@shared_task(bind=True)
def process_llm_streaming_task(self, messages, conversation_id, cache_key, user_id):
    """
    异步处理LLM流式响应的Celery任务
    """
    channel_layer = get_channel_layer()
    all_url = "192.168.1.101"
    url = f"http://{all_url}:8088/process/"

    full_response = ""
    response_type = None

    try:
        # 更新任务状态为开始处理
        cache.set(f"{cache_key}_status", "processing", timeout=300)

        # 使用同步的httpx客户端（因为Celery默认是同步的）
        with httpx.Client(timeout=None) as client:
            with client.stream("POST", url, json={"messages": messages}) as resp:
                for line in resp.iter_lines():
                    if line.strip():
                        try:
                            data_str = line[len("data:"):].strip()
                            chunk_data = json.loads(data_str)

                            if 'content' in chunk_data:
                                content_chunk = chunk_data['content']
                                full_response += content_chunk

                                # 将流式数据存储到Redis缓存中
                                stream_data = {
                                    'content': content_chunk,
                                    'conversation_id': conversation_id,
                                    'timestamp': time.time()
                                }

                                # 存储到Redis列表中，用于流式读取
                                cache_list_key = f"{cache_key}_stream"
                                current_stream = cache.get(cache_list_key, [])
                                current_stream.append(stream_data)
                                cache.set(cache_list_key, current_stream, timeout=300)

                                # 通过WebSocket发送给前端（如果使用WebSocket的话）
                                # async_to_sync(channel_layer.group_send)(
                                #     f"chat_{user_id}",
                                #     {
                                #         "type": "chat.message",
                                #         "message": stream_data
                                #     }
                                # )

                            if 'type' in chunk_data:
                                response_type = chunk_data['type']

                            if 'done' in chunk_data:
                                # 处理完成，存储最终结果
                                final_payload = {
                                    "conversation_id": conversation_id,
                                    "type_int": response_type,
                                    "done": True,
                                    "full_response": full_response
                                }

                                if response_type == 0:
                                    final_payload["questionnaire_list"] = [
                                        {
                                            "title_page": "情绪障碍筛查量表（SCARED）",
                                            "src_page": f"http://{all_url}:8000/questions/",
                                            "id_page": "anxiety_sas"
                                        },
                                    ]

                                # 存储最终结果到缓存
                                cache.set(f"{cache_key}_final", final_payload, timeout=300)
                                cache.set(f"{cache_key}_status", "completed", timeout=300)

                                # 标记流式传输完成
                                cache_list_key = f"{cache_key}_stream"
                                current_stream = cache.get(cache_list_key, [])
                                current_stream.append({"done": True, "final_payload": final_payload})
                                cache.set(cache_list_key, current_stream, timeout=300)

                                break

                        except json.JSONDecodeError as e:
                            print(f"JSON decode error: {e}")
                            continue

    except Exception as e:
        print(f"Error in LLM task: {e}")
        # 错误状态存储到缓存
        cache.set(f"{cache_key}_status", "error", timeout=300)
        cache.set(f"{cache_key}_error", str(e), timeout=300)

        # 将错误信息也加入流中
        cache_list_key = f"{cache_key}_stream"
        current_stream = cache.get(cache_list_key, [])
        current_stream.append({"error": str(e)})
        cache.set(cache_list_key, current_stream, timeout=300)

    return {
        "conversation_id": conversation_id,
        "full_response": full_response,
        "response_type": response_type
    }