from flask import Flask, request, jsonify, render_template, url_for
from flask_cors import CORS
import requests
import logging
import time
from datetime import datetime
import threading
import subprocess
import os
import json

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(message)s',
    handlers=[
        logging.FileHandler('chat.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

app = Flask(__name__)
CORS(app)

# 添加控制变量
generation_control = threading.Event()

# 添加聊天记录相关的配置
CHAT_HISTORY_DIR = 'chat_history'  # 聊天记录保存目录
if not os.path.exists(CHAT_HISTORY_DIR):
    os.makedirs(CHAT_HISTORY_DIR)

# 添加 Ollama 路径配置
OLLAMA_PATH = {
    'windows': r"%LOCALAPPDATA%\Programs\Ollama\ollama.exe",  # Windows 默认路径
    'linux': 'ollama',  # Linux/Mac 默认使用 PATH
}

@app.route('/')
def index():
    logger.info("访问主页")
    return render_template('index.html')

@app.route('/stop', methods=['POST'])
def stop_generation():
    try:
        # 发送停止信号
        generation_control.set()
        logger.info("收到停止生成请求")
        
        # 确保 Ollama 服务运行
        if not check_ollama_service():
            if not start_ollama_service():
                raise Exception("无法启动 Ollama 服务")
            logger.info("Ollama 服务已自动启动")
        
        # 获取可用模型列表
        models_response = requests.get('http://localhost:11434/api/tags')
        models = models_response.json()['models']
        default_model = models[0]['name'] if models else 'deepseek-r1:7b'
        
        # 从请求中获取当前选择的模型
        data = request.json
        selected_model = data.get('model', default_model)
        
        # 调用 Ollama API 的停止接口
        response = requests.post('http://localhost:11434/api/stop', 
            json={
                "model": selected_model
            })
        
        return jsonify({
            "status": "success",
            "message": "已停止生成"
        })
    except Exception as e:
        logger.exception("停止生成时发生错误")
        return jsonify({
            "status": "error",
            "message": str(e)
        })

@app.route('/chat', methods=['POST'])
def chat():
    try:
        # 重置停止信号
        generation_control.clear()
        
        start_time = time.time()
        request_id = datetime.now().strftime('%Y%m%d%H%M%S%f')
        
        # 确保 Ollama 服务运行
        if not check_ollama_service():
            if not start_ollama_service():
                raise Exception("无法启动 Ollama 服务")
            logger.info("Ollama 服务已自动启动")
        
        # 获取可用模型列表
        models_response = requests.get('http://localhost:11434/api/tags')
        models = models_response.json()['models']
        default_model = models[0]['name'] if models else 'deepseek-r1:7b'
        
        data = request.json
        user_message = data.get('message', '')
        custom_prompt = data.get('customPrompt', '').strip()
        selected_model = data.get('model', default_model)
        
        logger.info(f"请求 ID: {request_id}")
        logger.info(f"收到用户消息: {user_message}")
        
        # 构建提示，要求模型返回带思考过程的回答，并标记重要内容
        system_prompt = """请在回答问题时：
1. 将你的思考过程用<think>标签包裹
2. 将重要的内容、关键点用**加粗标记**

格式示例：
<think>
思考过程：
1. 分析问题要点
2. 查找相关信息
3. 形成解决方案
</think>

这是一个示例回答，其中**这部分是重要内容**，需要特别注意。
普通文本使用默认样式，而**重要的定义**和**关键概念**使用加粗标记。

实际答案内容..."""

        # 如果有自定义 prompt，添加到系统提示中
        if custom_prompt:
            system_prompt = f"{custom_prompt}\n\n{system_prompt}"

        full_prompt = f"{system_prompt}\n\n用户问题：{user_message}"
        
        # Ollama API 调用
        logger.info(f"开始调用 Ollama API...")
        api_start_time = time.time()
        
        response = requests.post('http://localhost:11434/api/generate', 
            json={
                "model": selected_model,
                "prompt": full_prompt,
                "stream": False
            })
        
        api_duration = time.time() - api_start_time
        total_duration = time.time() - start_time
        logger.info(f"Ollama API 调用完成，耗时: {api_duration:.2f}秒")
        
        if response.status_code == 200:
            bot_response = response.json()['response']
            # 在返回的思考过程中添加时间信息
            bot_response = bot_response.replace('<think>', f'<think>\n(API: {api_duration:.2f}s, 总计: {total_duration:.2f}s)\n')
            
            logger.info(f"AI 回复: {bot_response[:100]}..." if len(bot_response) > 100 else f"AI 回复: {bot_response}")
            
            return jsonify({
                "response": bot_response,
                "status": "success",
                "request_id": request_id,
                "timing": {
                    "api_duration": f"{api_duration:.2f}s",
                    "total_duration": f"{total_duration:.2f}s"
                }
            })
        else:
            error_msg = f"Ollama API 错误，状态码: {response.status_code}"
            logger.error(error_msg)
            logger.error(f"错误详情: {response.text}")
            return jsonify({
                "response": "发生错误",
                "status": "error",
                "error": error_msg,
                "request_id": request_id
            })
            
    except Exception as e:
        logger.exception("处理请求时发生错误")
        return jsonify({
            "response": f"错误: {str(e)}",
            "status": "error",
            "request_id": request_id if 'request_id' in locals() else None
        })

# 添加新的函数来检查 Ollama 服务状态
def check_ollama_service():
    try:
        response = requests.get('http://localhost:11434/api/version')
        return response.status_code == 200
    except:
        return False

# 修改启动 Ollama 的函数
def start_ollama_service():
    try:
        # 在 Windows 上
        if os.name == 'nt':
            ollama_path = os.path.expandvars(OLLAMA_PATH['windows'])
            if not os.path.exists(ollama_path):
                logger.error(f"Ollama 可执行文件未找到: {ollama_path}")
                return False
            
            # 不显示终端窗口
            startupinfo = subprocess.STARTUPINFO()
            startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
            startupinfo.wShowWindow = subprocess.SW_HIDE
            subprocess.Popen([ollama_path, 'serve'],
                           startupinfo=startupinfo)
        else:
            subprocess.Popen([OLLAMA_PATH['linux'], 'serve'],
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE)
        
        # 等待服务启动
        for _ in range(30):
            if check_ollama_service():
                logger.info("Ollama 服务启动成功")
                return True
            time.sleep(1)
        logger.error("Ollama 服务启动超时")
        return False
    except Exception as e:
        logger.error(f"启动 Ollama 服务失败: {str(e)}")
        return False

# 添加新的路由来检查和启动服务
@app.route('/check_ollama', methods=['GET'])
def check_ollama():
    is_running = check_ollama_service()
    return jsonify({
        "status": "running" if is_running else "stopped"
    })

@app.route('/start_ollama', methods=['POST'])
def start_ollama():
    if check_ollama_service():
        return jsonify({
            "status": "success",
            "message": "Ollama 服务已在运行"
        })
    
    if start_ollama_service():
        return jsonify({
            "status": "success",
            "message": "Ollama 服务启动成功"
        })
    else:
        return jsonify({
            "status": "error",
            "message": "Ollama 服务启动失败"
        }), 500

# 添加获取模型列表的路由
@app.route('/models', methods=['GET'])
def get_models():
    try:
        logger.info("开始获取模型列表...")
        response = requests.get('http://localhost:11434/api/tags')
        if response.status_code == 200:
            models = response.json().get('models', [])
            # 只记录模型名称和大小
            model_info = [f"{model.get('name')}({model.get('size', 'unknown size')})" for model in models]
            logger.info(f"可用模型: {', '.join(model_info)}")
            return jsonify({
                "status": "success",
                "models": models
            })
        else:
            logger.error(f"获取模型列表失败，状态码: {response.status_code}")
            return jsonify({
                "status": "error",
                "message": "获取模型列表失败"
            }), 500
    except Exception as e:
        logger.exception("获取模型列表时发生错误")
        return jsonify({
            "status": "error",
            "message": str(e)
        }), 500

# 添加保存和加载聊天记录的函数
def save_conversation(conversation_id, title, messages):
    try:
        filename = os.path.join(CHAT_HISTORY_DIR, f'{conversation_id}.json')
        data = {
            'id': conversation_id,
            'title': title,
            'messages': messages,
            'timestamp': datetime.now().isoformat()
        }
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
        logger.info(f"保存对话记录: {filename}")
        return True
    except Exception as e:
        logger.error(f"保存对话记录失败: {str(e)}")
        return False

def load_conversations():
    try:
        conversations = []
        for filename in os.listdir(CHAT_HISTORY_DIR):
            if filename.endswith('.json'):
                with open(os.path.join(CHAT_HISTORY_DIR, filename), 'r', encoding='utf-8') as f:
                    conversation = json.load(f)
                    conversations.append(conversation)
        # 按时间戳排序，最新的在前
        conversations.sort(key=lambda x: x.get('timestamp', ''), reverse=True)
        return conversations
    except Exception as e:
        logger.error(f"加载对话记录失败: {str(e)}")
        return []

def delete_conversation(conversation_id):
    try:
        filename = os.path.join(CHAT_HISTORY_DIR, f'{conversation_id}.json')
        if os.path.exists(filename):
            os.remove(filename)
            logger.info(f"删除对话记录: {filename}")
            return True
        return False
    except Exception as e:
        logger.error(f"删除对话记录失败: {str(e)}")
        return False

# 添加新的路由处理聊天记录
@app.route('/conversations', methods=['GET'])
def get_conversations():
    try:
        conversations = load_conversations()
        return jsonify({
            "status": "success",
            "conversations": conversations
        })
    except Exception as e:
        logger.exception("获取对话记录失败")
        return jsonify({
            "status": "error",
            "message": str(e)
        })

@app.route('/conversations/<conversation_id>', methods=['POST'])
def save_chat(conversation_id):
    try:
        data = request.json
        success = save_conversation(
            conversation_id,
            data.get('title', '新对话'),
            data.get('messages', [])
        )
        return jsonify({
            "status": "success" if success else "error"
        })
    except Exception as e:
        logger.exception("保存对话记录失败")
        return jsonify({
            "status": "error",
            "message": str(e)
        })

@app.route('/conversations/<conversation_id>', methods=['DELETE'])
def delete_chat(conversation_id):
    try:
        success = delete_conversation(conversation_id)
        return jsonify({
            "status": "success" if success else "error"
        })
    except Exception as e:
        logger.exception("删除对话记录失败")
        return jsonify({
            "status": "error",
            "message": str(e)
        })

if __name__ == '__main__':
    logger.info("启动服务器...")
    app.run(debug=True, port=5000) 