from rest_framework import status
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from django.conf import settings
import logging
import concurrent.futures
from rest_framework.views import APIView
from ..serializers import (
    NoteSerializer
)
from ..models import Note, Tag, Category, ChatHistory
from ..services.ai_service import ai_service, AIService
from django.core.cache import cache
from apps.ai.services.ai_manager import AIManager
from rest_framework.exceptions import APIException
from django.http import StreamingHttpResponse
import json
import time
import uuid
from django.utils import timezone
import datetime
import re

logger = logging.getLogger('apps')

# 流量控制和限流设置
RATE_LIMIT_CONFIG = {
    'window_size': getattr(settings, 'AI_RATE_LIMIT_WINDOW', 60 * 60),  # 默认1小时窗口
    'max_requests': getattr(settings, 'AI_RATE_LIMIT_MAX_REQUESTS', 50),  # 默认每窗口最多50次请求
    'cooldown_time': getattr(settings, 'AI_RATE_LIMIT_COOLDOWN', 60 * 5),  # 默认冷却时间5分钟
}

class BaseResponse:
    @staticmethod
    def success(data=None, message="操作成功"):
        return Response({
            "status": "success",
            "message": message,
            "data": data
        })

    @staticmethod
    def error(message="操作失败", status=status.HTTP_400_BAD_REQUEST):
        return Response({
            "status": "error",
            "message": message
        }, status=status)

class AIProcessingError(APIException):
    status_code = 503
    default_detail = "AI 服务暂时不可用"
    defalut_code = "ai_service_unavailable"

class RateLimitExceeded(APIException):
    status_code = 429
    default_detail = "请求频率超过限制，请稍后再试"
    default_code = "rate_limit_exceeded"

class NoteChatView(APIView):
    permission_classes = [IsAuthenticated]

    def _is_confirmation_message(self, message):
        """检查是否是确认消息"""
        confirmation_keywords = [
            '确认', '应用', '接受', '采纳', '同意', '好的', '可以',
            '没问题', '就这样', '确定', '是的', '对', '行', 'ok', 'yes'
        ]

        message = message.lower()
        return any(keyword in message for keyword in confirmation_keywords)
    
    def _check_rate_limit(self, user_id):
        """检查用户是否超过请求频率限制"""
        key = f"ai_rate_limit_{user_id}"
        current_data = cache.get(key, {"requests": [], "is_limited": False})
        
        # 如果用户正在冷却期间
        if current_data.get("is_limited", False):
            if "cooldown_until" in current_data:
                cooldown_until = current_data["cooldown_until"]
                if timezone.now() < cooldown_until:
                    # 计算剩余冷却时间
                    remaining = (cooldown_until - timezone.now()).total_seconds()
                    logger.warning(f"用户 {user_id} 处于冷却期，还需等待 {remaining:.1f} 秒")
                    return False, f"请求频率过高，请在 {int(remaining)} 秒后再试"
                else:
                    # 冷却时间已过，重置限制
                    current_data["is_limited"] = False
                    current_data["requests"] = []
        
        # 获取当前时间窗口内的请求数
        now = timezone.now()
        window_start = now - datetime.timedelta(seconds=RATE_LIMIT_CONFIG['window_size'])
        
        # 过滤出当前窗口内的请求
        current_window_requests = [req for req in current_data["requests"] if req >= window_start]
        
        # 如果请求数量超过限制
        if len(current_window_requests) >= RATE_LIMIT_CONFIG['max_requests']:
            # 设置冷却时间
            cooldown_until = now + datetime.timedelta(seconds=RATE_LIMIT_CONFIG['cooldown_time'])
            current_data = {
                "requests": current_window_requests,
                "is_limited": True,
                "cooldown_until": cooldown_until
            }
            cache.set(key, current_data, RATE_LIMIT_CONFIG['window_size'] + RATE_LIMIT_CONFIG['cooldown_time'])
            logger.warning(f"用户 {user_id} 超过请求限制，已激活冷却时间")
            return False, f"在 {RATE_LIMIT_CONFIG['window_size'] // 60} 小时内的请求次数超过 {RATE_LIMIT_CONFIG['max_requests']} 次，请在 {RATE_LIMIT_CONFIG['cooldown_time'] // 60} 分钟后再试"
        
        # 添加当前请求
        current_window_requests.append(now)
        current_data = {
            "requests": current_window_requests,
            "is_limited": False
        }
        cache.set(key, current_data, RATE_LIMIT_CONFIG['window_size'])
        
        return True, ""

    def post(self, request):
        """处理对话请求，支持流式输出和普通输出"""
        # 生成请求ID，用于跟踪和调试
        request_id = str(uuid.uuid4())
        request.META['X-Request-ID'] = request_id
        
        start_time = time.time()
        logger.info(f"[{request_id}] 开始处理对话请求")
        
        try:
            # 检查请求频率限制
            can_proceed, error_message = self._check_rate_limit(request.user.id)
            if not can_proceed:
                logger.warning(f"[{request_id}] 用户 {request.user.id} 请求被限制: {error_message}")
                raise RateLimitExceeded(error_message)
            
            # 从请求体中获取笔记ID
            note_id = request.data.get('note_id')
            note = None
            is_draft_mode = False
            
            # 检查是否为草稿模式（无ID，直接传递内容）
            if not note_id or note_id == 'draft' or request.data.get('is_draft', False):
                is_draft_mode = True
                logger.info(f"[{request_id}] 处理草稿模式对话请求")
                
                # 创建临时笔记对象，不保存到数据库
                note = Note(
                    user=request.user,
                    title=request.data.get('title', ''),
                    content=request.data.get('content', ''),
                    note_type=request.data.get('plan_type', 'general'),
                    status=request.data.get('status', 'pending')
                )
            else:
                # 常规模式：获取数据库中的笔记
                note = get_object_or_404(Note, id=note_id, user=request.user)

            # 获取消息内容
            message = request.data.get('message', '')
            if not message:
                return Response({
                    "status": "error",
                    "message": "消息不能为空",
                    "request_id": request_id
                }, status=400)

            # 检查是否请求流式输出
            stream = request.data.get('stream', True)
            
            # 检查是否是确认消息
            is_confirmation = self._is_confirmation_message(message)
            logger.info(f"[{request_id}] 处理对话请求: note_id={note_id}, message_length={len(message)}, is_confirmation={is_confirmation}, stream={stream}, is_draft={is_draft_mode}")

            # 如果需要流式输出
            if stream:
                return self._handle_stream_request(request, note, message, is_confirmation, request_id)
            
            # 普通输出模式
            # 使用线程池并行处理AI请求
            with concurrent.futures.ThreadPoolExecutor() as executor:
                future = executor.submit(ai_service.chat, message, note)
                response = future.result()
            
            # 如果不是草稿模式，才记录对话历史
            if not is_draft_mode:
                ChatHistory.objects.create(
                    user = request.user,
                    note = note,
                    message = message,
                    response = response.get('content', ''),
                    message_type = 'confirmation' if is_confirmation else 'regular'
                )

            # 如果是确认消息，并且成功应用了更改，返回更新后的内容
            if is_confirmation and response.get('type') == "success" and not is_draft_mode:
                # 重新获取笔记，以确保返回最新内容
                note.refresh_from_db()
                logger.info(f"[{request_id}] 确认更新后的笔记内容: {note.content[:100]}...")
                return Response({
                    'status': 'success',
                    'data': {
                        'content': response.get('content', ''),
                        'suggestions': response.get('suggestions', []),
                        'note': {
                            'id': note.id,
                            'content': note.content,
                            'title': note.title
                        }
                    },
                    'request_id': request_id,
                    'processing_time': f"{time.time() - start_time:.2f}s"
                })

            logger.info(f"[{request_id}] 对话处理完成，耗时: {time.time() - start_time:.2f}s")
            return Response({
                "status": "success",
                'data': response,
                'request_id': request_id,
                'processing_time': f"{time.time() - start_time:.2f}s"
            })

        except RateLimitExceeded as e:
            return Response({
                'status': 'error',
                'message': str(e),
                'error_type': 'rate_limit_exceeded',
                'request_id': request_id
            }, status=429)
        except Exception as e:
            logger.error(f"[{request_id}] 处理对话失败: {str(e)}")
            return Response({
                'status': 'error',
                'message': str(e),
                'request_id': request_id,
                'processing_time': f"{time.time() - start_time:.2f}s"
            }, status=500)
            
    def _handle_stream_request(self, request, note, message, is_confirmation, request_id):
        """处理流式对话请求"""
        try:
            start_time = time.time()
            
            # 判断是否为草稿模式
            is_draft_mode = hasattr(note, 'id') and note.id is None
            
            # 创建流式响应
            def event_stream():
                import threading
                import gzip
                import zlib
                import time
                from django.conf import settings
                
                # 检测浏览器支持的压缩方式
                accept_encoding = request.META.get('HTTP_ACCEPT_ENCODING', '')
                compress_response = getattr(settings, 'COMPRESS_STREAMING_RESPONSE', True)
                
                # 断线重连配置
                max_retries = getattr(settings, 'STREAM_MAX_RETRIES', 3)
                retry_delay = getattr(settings, 'STREAM_RETRY_DELAY', 1.0)  # 秒
                current_retry = 0
                
                # 流量控制与统计
                token_count = 0
                chunk_count = 0
                total_bytes = 0
                stream_start_time = time.time()
                
                # 发送初始调试信息
                debug_info = {
                    "event": "debug",
                    "request_id": request_id,
                    "timestamp": stream_start_time,
                    "user_id": request.user.id,
                    "note_id": note.id if hasattr(note, 'id') and note.id else None,
                    "is_confirmation": is_confirmation,
                    "is_draft_mode": is_draft_mode
                }
                yield f"data: {json.dumps(debug_info, ensure_ascii=False)}\n\n"
                
                # 累积完整响应
                full_response = ""
                response_type = "message"
                scene = "general"
                last_chunk_index = 0  # 记录最后发送的块索引
                
                # 心跳保持连接
                def send_heartbeat():
                    while True:
                        time.sleep(30)  # 每30秒发送一次心跳
                        yield f": heartbeat\n\n"  # 注释行，客户端会忽略但能保持连接
                
                # 启动心跳线程
                heartbeat_thread = threading.Thread(target=send_heartbeat)
                heartbeat_thread.daemon = True
                heartbeat_thread.start()
                
                # 断线重连处理
                while current_retry <= max_retries:
                    try:
                        # 调用流式API，从上次断点继续
                        stream_generator = ai_service.chat_streaming(
                            message, 
                            note,
                            resume_from=last_chunk_index if current_retry > 0 else 0
                        )
                        
                        chunk_counter = 0
                        for chunk_data in stream_generator:
                            chunk = chunk_data.get("chunk", "")
                            scene = chunk_data.get("scene", "general")
                            needs_edit = chunk_data.get("needs_edit", False)
                            
                            # 更新统计信息
                            chunk_count += 1
                            token_count += len(chunk) // 4  # 粗略估计token数量
                            
                            # 跳过已发送的内容（断线重连时）
                            if current_retry > 0 and chunk_counter < last_chunk_index:
                                chunk_counter += 1
                                continue
                            
                            # 更新索引
                            last_chunk_index = chunk_counter
                            chunk_counter += 1
                            
                            # 累积完整响应
                            full_response += chunk
                            
                            # 确定响应类型
                            if scene == "study_plan":
                                response_type = "plan"
                            elif scene == "note_optimization":
                                response_type = "optimization"
                            elif scene == "task_breakdown":
                                response_type = "task"
                            elif scene == "greeting" or scene == "help":
                                response_type = "message"
                            elif scene == "error":
                                response_type = "error"
                            elif needs_edit:
                                response_type = "edit_suggestion"
                            
                            # Kimi API格式响应
                            kimi_format_response = {
                                "choices": [{
                                    "delta": {
                                        "content": chunk
                                    },
                                    "index": 0,
                                    "finish_reason": None
                                }]
                            }
                            
                            # 添加断线重连标记（如果是重连）
                            if current_retry > 0 and chunk_counter == last_chunk_index + 1:
                                kimi_format_response["reconnected"] = True
                                kimi_format_response["retry_count"] = current_retry
                            
                            # 检查是否是错误响应
                            if chunk_data.get("type") == "error":
                                logger.warning(f"[{request_id}] 收到API错误响应: {chunk_data}")
                                
                                # 构造错误响应格式
                                error_response = {
                                    "type": "error",
                                    "content": chunk_data.get("content", "AI服务暂时不可用，请稍后再试"),
                                    "error_type": chunk_data.get("error_type", "service_error")
                                }
                                
                                # 将错误信息返回给客户端
                                yield f"data: {json.dumps(error_response, ensure_ascii=False)}\n\n"
                                
                                # 终止流式传输
                                break
                            
                            # 初始化complete_response变量
                            complete_response = None
                            
                            # 检查是否是内容编辑请求，并设置相应标记
                            if chunk_data.get("type") == "edit_suggestion" or chunk_data.get("scene") == "note_optimization":
                                logger.info(f"[{request_id}] 检测到编辑请求场景: 类型={chunk_data.get('type')}, 场景={chunk_data.get('scene')}")
                                
                                needs_edit = True
                                edit_suggestion = chunk_data.get("suggestedContent", "")
                                ai_response = chunk_data.get("ai_response", "我已根据您的要求修改了内容。")
                                
                                logger.info(f"[{request_id}] 分离编辑内容和响应: 响应长度={len(ai_response)}, 内容长度={len(edit_suggestion) if edit_suggestion else 0}")
                                
                                # 添加编辑标记和建议内容到最终响应
                                if complete_response is None:
                                    complete_response = {
                                        'complete_content': '',
                                        'type': 'edit_suggestion',
                                        'scene': 'note_optimization',
                                        'edit_content': True,  # 明确标记这是编辑请求
                                        'is_edit': True,  # 添加一致的编辑标记
                                        'confirmed': True  # 添加确认字段，告知前端直接将内容应用到编辑区
                                    }
                                
                                # 如果有建议内容，添加到最终响应
                                if edit_suggestion:
                                    complete_response['suggestedContent'] = edit_suggestion
                                    # 添加AI回复，使结构更清晰
                                    complete_response['ai_response'] = ai_response
                                    # 关键部分：分离内容和响应
                                    complete_response['contentOnly'] = edit_suggestion
                                    complete_response['responseOnly'] = ai_response
                            
                            # 按照Kimi流式输出格式发送事件
                            data_chunk = json.dumps(kimi_format_response, ensure_ascii=False)
                            total_bytes += len(data_chunk.encode('utf-8'))
                            
                            # 按照Kimi流式输出格式发送事件
                            output = f"data: {data_chunk}\n\n"
                            yield output
                            
                            # 每产生20个块发送一次统计信息
                            if chunk_count % 20 == 0:
                                elapsed = time.time() - stream_start_time
                                stats = {
                                    "event": "stats",
                                    "tokens": token_count,
                                    "chunks": chunk_count,
                                    "bytes": total_bytes,
                                    "elapsed": f"{elapsed:.2f}s",
                                    "tokens_per_second": int(token_count / elapsed) if elapsed > 0 else 0
                                }
                                yield f"data: {json.dumps(stats, ensure_ascii=False)}\n\n"
                        
                        # 流式输出成功完成，发送完成事件
                        finish_event = {
                            "choices": [{
                                "delta": {},
                                "index": 0,
                                "finish_reason": "stop"
                            }]
                        }
                        yield f"data: {json.dumps(finish_event, ensure_ascii=False)}\n\n"
                        
                        # 发送包含完整内容的响应，兼容自定义格式
                        if complete_response:
                            final_response = complete_response
                        else:
                            # 清理完整响应，移除多余的AI对话语句
                            if hasattr(ai_service, '_clean_study_note_output'):
                                cleaned_full_response = ai_service._clean_study_note_output(full_response)
                            else:
                                # 增强简单清理，防止方法不可用时的清理不彻底
                                cleaned_full_response = re.sub(r'^(好的|这是|以下是|下面是).*?\n', '', full_response, flags=re.IGNORECASE)
                                cleaned_full_response = re.sub(r'\n(希望这对你有所帮助|希望这些信息对你有用|如有任何问题).*?$', '', cleaned_full_response, flags=re.IGNORECASE)
                                # 额外清理建议性语句
                                cleaned_full_response = re.sub(r'\n- \*\*理论学习\*\*：.*?(\n|$)', '', cleaned_full_response, flags=re.IGNORECASE | re.MULTILINE)
                                cleaned_full_response = re.sub(r'\n- \*\*实践应用\*\*：.*?(\n|$)', '', cleaned_full_response, flags=re.IGNORECASE | re.MULTILINE)
                                cleaned_full_response = re.sub(r'\n- \*\*问题诊断\*\*：.*?(\n|$)', '', cleaned_full_response, flags=re.IGNORECASE | re.MULTILINE)
                                cleaned_full_response = re.sub(r'\n建议(大学生|学生)在学习这些知识点时.*?$', '', cleaned_full_response, flags=re.IGNORECASE | re.MULTILINE)
                                cleaned_full_response = re.sub(r'\n\n[^\n]*学习[^\n]*建议[^\n]*$', '', cleaned_full_response, flags=re.IGNORECASE | re.MULTILINE)
                            
                            # 对TCP这类特定内容的额外清理
                            if "TCP" in cleaned_full_response and "传输控制协议" in cleaned_full_response:
                                # 移除TCP笔记中常见的建议段落
                                cleaned_full_response = re.sub(r'\n对于大学生(来说)?，.*?$', '', cleaned_full_response, flags=re.IGNORECASE | re.MULTILINE)
                                cleaned_full_response = re.sub(r'在学习这些知识点时.*?$', '', cleaned_full_response, flags=re.IGNORECASE | re.MULTILINE)
                                cleaned_full_response = re.sub(r'通过(实验|实践|项目).*加深.*理解.*$', '', cleaned_full_response, flags=re.IGNORECASE | re.MULTILINE)
                            
                            # 如果是编辑建议类型，添加编辑标记
                            if response_type == 'edit_suggestion' or scene == 'note_optimization' or scene == 'note_enrich':
                                # 提取内容和生成回复
                                ai_service_instance = AIService()
                                
                                # 1. 首先尝试从原始内容中提取标题
                                original_title = ""
                                if hasattr(note, 'content') and note.content and note.content.startswith('#'):
                                    title_match = re.match(r'^(#[^#\n]+\n)', note.content)
                                    if title_match:
                                        original_title = title_match.group(1)
                                        logger.info(f"[{request_id}] 从原始内容提取到标题: {original_title}")
                                
                                # 2. 从完整响应中提取建议内容，如果内容不以标题开头，添加原始标题
                                suggested_content = ai_service_instance._extract_suggested_content_from_text(cleaned_full_response)
                                
                                # 3. 如果提取的内容不以标题开头，但原始内容有标题，则添加原始标题
                                if original_title and suggested_content and not suggested_content.startswith('#'):
                                    logger.info(f"[{request_id}] 将原始标题添加到提取的内容前")
                                    suggested_content = original_title + suggested_content
                                
                                # 4. 生成解释性响应
                                ai_explanation = ai_service_instance._generate_edit_explanation(cleaned_full_response, suggested_content)
                                
                                # 5. 构建完整响应对象
                                final_response = {
                                    'complete_content': cleaned_full_response,
                                    'type': 'edit_suggestion',
                                    'scene': scene,
                                    'edit_content': True,
                                    'is_edit': True,
                                    'confirmed': True,  # 告知前端直接应用内容
                                    'suggestedContent': suggested_content or cleaned_full_response,
                                    'ai_response': ai_explanation,
                                    'contentOnly': suggested_content or cleaned_full_response,
                                    'responseOnly': ai_explanation,
                                    'preserveTitle': original_title != "",  # 标记是否保留了原始标题
                                    'contentLength': len(suggested_content or cleaned_full_response)  # 添加内容长度信息用于调试
                                }
                            else:
                                final_response = {
                                    'complete_content': cleaned_full_response,
                                    'type': response_type,
                                    'scene': scene
                                }
                        
                        yield f"data: {json.dumps(final_response, ensure_ascii=False)}\n\n"
                        
                        # 发送最终统计信息
                        elapsed = time.time() - stream_start_time
                        final_stats = {
                            "event": "final_stats",
                            "request_id": request_id,
                            "tokens": token_count,
                            "chunks": chunk_count,
                            "bytes": total_bytes,
                            "total_time": f"{elapsed:.2f}s",
                            "tokens_per_second": int(token_count / elapsed) if elapsed > 0 else 0
                        }
                        yield f"data: {json.dumps(final_stats, ensure_ascii=False)}\n\n"
                        
                        # 执行成功，跳出重试循环
                        break
                        
                    except Exception as e:
                        logger.warning(f"[{request_id}] 流式输出中断，尝试重连 ({current_retry+1}/{max_retries}): {str(e)}")
                        
                        if current_retry < max_retries:
                            # 发送重连通知
                            reconnect_notice = {
                                "reconnecting": True,
                                "retry_count": current_retry + 1,
                                "max_retries": max_retries,
                                "reason": str(e)
                            }
                            yield f"data: {json.dumps(reconnect_notice, ensure_ascii=False)}\n\n"
                            
                            # 等待一段时间后重试
                            time.sleep(retry_delay)
                            current_retry += 1
                        else:
                            # 达到最大重试次数，发送错误
                            logger.error(f"[{request_id}] 流式对话重连失败，已达最大重试次数: {str(e)}")
                            error_event = {
                                "error": {
                                    "message": f"连接中断，重试{max_retries}次后仍未恢复: {str(e)}",
                                    "type": "connection_error",
                                    "code": 503,
                                    "partial_response": full_response,
                                    "request_id": request_id
                                }
                            }
                            yield f"data: {json.dumps(error_event, ensure_ascii=False)}\n\n"
                            break
                
                # 如果不是草稿模式，使用线程异步记录对话历史
                if not is_draft_mode:
                    thread = threading.Thread(
                        target=self._record_chat_history,
                        args=(request.user, note, message, full_response, is_confirmation, request_id)
                    )
                    thread.daemon = True  # 设置为守护线程，避免阻塞服务器关闭
                    thread.start()
                
                logger.info(f"[{request_id}] 流式响应完成，总耗时: {time.time() - start_time:.2f}秒，发送块数: {chunk_count}")
                    
            # 设置响应头，允许跨域请求和SSE
            response = StreamingHttpResponse(
                event_stream(),
                content_type='text/event-stream'
            )
            # 设置缓存控制和流式传输相关的头信息
            response['Cache-Control'] = 'no-cache, no-transform'
            response['X-Accel-Buffering'] = 'no'  # 禁用Nginx的缓冲
            response['Access-Control-Allow-Origin'] = '*'
            
            # 支持跨域请求的必要头
            response['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
            response['Access-Control-Allow-Headers'] = 'Content-Type, Authorization'
            response['Access-Control-Expose-Headers'] = 'X-Process-Time, X-Request-ID'
            
            # 添加请求ID头
            response['X-Request-ID'] = request_id
            
            return response
            
        except Exception as e:
            logger.error(f"[{request_id}] 处理流式对话请求失败: {str(e)}")
            return Response({
                'status': 'error',
                'message': str(e),
                'error_type': 'server_error',
                'code': 500,
                'request_id': request_id,
                'processing_time': f"{time.time() - start_time:.2f}s"
            }, status=500)
    
    def _record_chat_history(self, user, note, message, response, is_confirmation, request_id=None):
        """异步记录对话历史"""
        try:
            # 添加延迟重试机制
            max_retries = 3
            retry_count = 0
            success = False
            
            while not success and retry_count < max_retries:
                try:
                    ChatHistory.objects.create(
                        user=user,
                        note=note,
                        message=message,
                        response=response,
                        message_type='confirmation' if is_confirmation else 'regular'
                    )
                    success = True
                    if request_id:
                        logger.info(f"[{request_id}] 成功记录对话历史")
                except Exception as e:
                    retry_count += 1
                    import time
                    time.sleep(0.5)  # 重试前等待时间
                    if request_id:
                        logger.warning(f"[{request_id}] 记录对话历史重试 {retry_count}/{max_retries}: {str(e)}")
                    else:
                        logger.warning(f"记录对话历史重试 {retry_count}/{max_retries}: {str(e)}")
            
            if not success:
                if request_id:
                    logger.error(f"[{request_id}] 记录对话历史失败，已重试{max_retries}次")
                else:
                    logger.error(f"记录对话历史失败，已重试{max_retries}次")
        except Exception as e:
            if request_id:
                logger.error(f"[{request_id}] 记录对话历史失败: {str(e)}")
            else:
                logger.error(f"记录对话历史失败: {str(e)}")


class NoteSummaryView(APIView):
    """生成笔记摘要"""
    permission_classes = [IsAuthenticated]

    def post(self, request, pk):
        try:
            note = Note.objects.get(pk=pk, user=request.user)
            cache_key = f"note_summary_{pk}_{note.updated_at.timestamp()}"

            # 尝试从缓存中获取摘要
            summary = cache.get(cache_key)
            if summary is None:
                max_tokens = request.data.get('max_tokens', 500)

                summary = ai_service.generate_summary(note.content)

                if summary:
                    cache.set(cache_key, summary, timeout=3600)

            if summary:
                # 确保返回格式一致
                if isinstance(summary, dict) and 'summary' in summary:
                    # 已经是复杂格式，直接返回
                    return BaseResponse.success(
                        {"summary": summary},
                        "摘要生成成功"
                    )
                else:
                    # 简单字符串格式，转换为前端期望的格式
                    return BaseResponse.success(
                        {"summary": summary},
                        "摘要生成成功"
                    )
            return BaseResponse.error("摘要生成失败")

        except Note.DoesNotExist:
            return BaseResponse.error("笔记不存在", status.HTTP_404_NOT_FOUND)
        except Exception as e:
            logger.error(f"生成摘要失败: {str(e)}")
            return BaseResponse.error(str(e))


class NotePlanView(APIView):
    """通过笔记生成计划"""
    permission_classes = [IsAuthenticated]

    def post(self, request, pk):
        try:
            note = Note.objects.get(pk=pk, user=request.user)

            # 调用 AI 服务生成计划，不传递 max_tokens 参数
            plan = ai_service.generate_plan(note.content)

            if plan:
                return BaseResponse.success(
                    {"plan": plan},
                    "计划生成成功"
                )
            return BaseResponse.error("计划生成失败")
        except Note.DoesNotExist:
            return BaseResponse.error("笔记不存在", status.HTTP_404_NOT_FOUND)
        except Exception as e:
            logger.error(f"生成计划失败: {str(e)}")
            return BaseResponse.error(str(e))


class SmartNoteView(APIView):
    """智能笔记视图"""
    permission_classes = [IsAuthenticated]

    def __init__(self):
        super().__init__()
        self.ai_manager = AIManager()

    async def post(self, request):
        """创建智能笔记"""
        try:
            input_type = request.data.get('input_type', 'text')  # text/voice/image
            content = request.data.get('content')
            note_type = request.data.get('note_type', 'general')  # general/study/meeting/task

            if not content:
                return self.get_error_response("请提供内容")

            # 根据输入类型处理内容
            processed_content = await self._process_input(
                input_type,
                content,
                request.user.id
            )

            # 使用 AI 增强内容
            enhanced_content = await self.ai_manager.enhance_note_content(
                processed_content,
                style=note_type
            )

            # 分析内容并提供建议
            analysis = await self.ai_manager.analyze_content(
                enhanced_content,
                analysis_type=note_type,
                include_suggestions=True
            )

            # 创建笔记
            note = Note.objects.create(
                user=request.user,
                title=analysis.get('title', '新笔记'),
                content=enhanced_content,
                note_type=note_type,
                is_ai_enhanced=True
            )

            # 处理标签和分类建议
            await self._handle_suggestions(note, analysis)

            return self.get_success_response({
                'note': NoteSerializer(note).data,
                'analysis': analysis,
                'suggestions': analysis.get('suggestions', [])
            })

        except Exception as e:
            logger.error(f"创建智能笔记失败: {str(e)}")
            return self.get_error_response(str(e))

    async def _process_input(self, input_type: str, content: str, user_id: int) -> str:
        """处理不同类型的输入"""
        if input_type == 'voice':
            # 处理语音输入
            voice_result = await self.ai_manager.process_voice_command(
                content,
                user_id,
                {'type': 'note_creation'}
            )
            return voice_result['original_text']

        elif input_type == 'image':
            # 处理图片输入（如果需要）
            pass

        return content

    async def _handle_suggestions(self, note: Note, analysis: dict):
        """处理 AI 分析的建议"""
        # 处理标签建议
        if 'keywords' in analysis:
            for keyword in analysis['keywords']:
                tag, _ = Tag.objects.get_or_create(
                    name=keyword,
                    user=note.user
                )
                note.tags.add(tag)

        # 处理分类建议
        if 'category' in analysis:
            category, _ = Category.objects.get_or_create(
                name=analysis['category'],
                user=note.user
            )
            note.category = category
            note.save()

        # 创建相关日程（如果有）
        if 'suggested_tasks' in analysis:
            for task in analysis['suggested_tasks']:
                if task['type'] == 'schedule':
                    await self.ai_manager.create_schedule(
                        content=task['content'],
                        user_id=note.user.id,
                        schedule_type=task.get('schedule_type', 'task')
                    )


def process_with_ai(note):
    """使用 AI 处理笔记"""
    try:
        if not settings.DEEPSEEK_API_KEY:
            raise AIProcessingError('DeepSeek API key 未配置')

        return ai_service.process_note(note)

    except Exception as e:
        note.ai_processing_status = 'failed'
        note.save()
        raise AIProcessingError(str(e))


class EnhanceNoteView(APIView):
    """笔记增强视图"""
    permission_classes = [IsAuthenticated]

    async def post(self, request):
        try:
            note_id = request.data.get('note_id')
            content = request.data.get('content')
            note_type = request.data.get('type', 'general')
            user_preferences = request.data.get('preferences', {})

            if not note_id and not content:
                return BaseResponse.error("请提供笔记ID或内容")

            if note_id:
                note = await self._get_note(note_id, request.user)
                content = note.content

            ai_service = AIService()
            enhanced_content = await ai_service.enhance_note_content(
                content=content,
                note_type=note_type,
                user_preferences=user_preferences
            )

            return BaseResponse.success(enhanced_content)

        except Exception as e:
            logger.error(f"笔记增强失败: {str(e)}")
            return BaseResponse.error(str(e))


class StudyPlanView(APIView):
    """学习计划视图"""
    permission_classes = [IsAuthenticated]

    async def post(self, request):
        try:
            notes = request.data.get('notes', [])
            target_date = request.data.get('target_date')
            study_goal = request.data.get('study_goal')

            if not notes:
                return BaseResponse.error("请提供相关笔记")

            ai_service = AIService()
            study_plan = await ai_service.generate_study_plan(
                notes=notes,
                target_date=target_date,
                study_goal=study_goal
            )

            return BaseResponse.success(study_plan)

        except Exception as e:
            logger.error(f"生成学习计划失败: {str(e)}")
            return BaseResponse.error(str(e))


class OptimizeContentView(APIView):
    """优化笔记内容"""
    permission_classes = [IsAuthenticated]

    def post(self, request, pk):
        try:
            note = Note.objects.get(pk=pk, user=request.user)
            
            # 调用AI服务优化内容
            optimized_content = ai_service.optimize_content(note.content)
            
            if optimized_content:
                return BaseResponse.success(
                    {"optimizedContent": optimized_content},
                    "内容优化成功"
                )
            return BaseResponse.error("内容优化失败")
            
        except Note.DoesNotExist:
            return BaseResponse.error("笔记不存在", status.HTTP_404_NOT_FOUND)
        except Exception as e:
            logger.error(f"优化内容失败: {str(e)}")
            return BaseResponse.error(str(e))
