from flask import Blueprint, jsonify, request
import os
from services.log_collector import LinuxLogCollector, LogCollectScheduler
import yaml
import requests
import json
from datetime import datetime


logs_bp = Blueprint('logs', __name__)

# 全局采集器和调度器实例
_collector = LinuxLogCollector()
_scheduler = None

# 采集和分析功能分离的API设计
# 采集相关: /collect, /schedule, /stop, /list, /content, /latest
# 分析相关: /analyze, /models, /analysis, /analysis-list, /storage-info

def _get_log_defaults():
    try:
        conf_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'conf.yaml')
        if os.path.exists(conf_path):
            with open(conf_path, 'r', encoding='utf-8', errors='ignore') as f:
                conf = yaml.safe_load(f) or {}
            return conf.get('log_collection', {}) or {}
    except Exception:
        pass
    return {}


# ==================== 日志采集相关API ====================

@logs_bp.route('/collect', methods=['POST'])
def collect_logs():
    """
    手动触发日志采集
    支持两种模式: default(本地采集) | path_list(从文件列表采集)
    
    存储位置: backend/data/logs/{host}/{date}/
    """
    data = request.get_json(silent=True) or {}
    extra_files = data.get('extra_files') if isinstance(data.get('extra_files'), list) else None
    mode = data.get('mode', 'default')  # default | path_list
    defaults = _get_log_defaults()
    min_level = str(data.get('min_level', defaults.get('min_level', 'WARN')))
    since_hours = data.get('since_hours', defaults.get('since_hours', 24))
    try:
        since_hours = int(since_hours) if since_hours is not None else None
    except Exception:
        since_hours = defaults.get('since_hours', 24)
    include_keywords = data.get('include_keywords') if isinstance(data.get('include_keywords'), list) else None
    exclude_keywords = data.get('exclude_keywords') if isinstance(data.get('exclude_keywords'), list) else None
    max_lines_per_source = data.get('max_lines_per_source', defaults.get('max_lines_per_source', 10000))
    try:
        max_lines_per_source = int(max_lines_per_source)
    except Exception:
        max_lines_per_source = defaults.get('max_lines_per_source', 10000)
    max_total_lines = data.get('max_total_lines', defaults.get('max_total_lines', 50000))
    try:
        max_total_lines = int(max_total_lines)
    except Exception:
        max_total_lines = defaults.get('max_total_lines', 50000)
    incremental = bool(data.get('incremental', defaults.get('incremental', True)))
    journal_units = data.get('journal_units', defaults.get('journal_units'))
    save_raw = bool(data.get('save_raw', defaults.get('save_raw', False)))

    if mode == 'path_list':
        limit_per_source = int(data.get('limit_per_source', 10))
        list_file = data.get('list_file')  # 若为空则使用默认 backend/log.txt
        result = _collector.collect_from_path_list(
            list_file=list_file,
            limit_per_source=limit_per_source,
            min_level=min_level,
            include_keywords=include_keywords,
            exclude_keywords=exclude_keywords,
            save_raw=save_raw,
        )
    else:
        result = _collector.collect_local(
            extra_files=extra_files,
            min_level=min_level,
            since_hours=since_hours,
            include_keywords=include_keywords,
            exclude_keywords=exclude_keywords,
            max_lines_per_source=max_lines_per_source,
            max_total_lines=max_total_lines,
            incremental=incremental,
            journal_units=journal_units,
            save_raw=save_raw,
        )
    
    return jsonify({
        'status': 'success', 
        'result': result,
        'storage_info': _collector.get_storage_info()
    })


@logs_bp.route('/schedule', methods=['POST'])
def schedule_collect():
    """
    启动定时日志采集调度器
    
    存储位置: backend/data/logs/{host}/{date}/
    """
    global _scheduler
    data = request.get_json(silent=True) or {}
    interval = int(data.get('interval', 300))  # 默认5分钟
    extra_files = data.get('extra_files') if isinstance(data.get('extra_files'), list) else None
    defaults = _get_log_defaults()
    min_level = str(data.get('min_level', defaults.get('min_level', 'WARN')))
    since_hours = data.get('since_hours', defaults.get('since_hours', 24))
    try:
        since_hours = int(since_hours) if since_hours is not None else None
    except Exception:
        since_hours = defaults.get('since_hours', 24)
    include_keywords = data.get('include_keywords') if isinstance(data.get('include_keywords'), list) else None
    exclude_keywords = data.get('exclude_keywords') if isinstance(data.get('exclude_keywords'), list) else None
    max_lines_per_source = data.get('max_lines_per_source', defaults.get('max_lines_per_source', 10000))
    try:
        max_lines_per_source = int(max_lines_per_source)
    except Exception:
        max_lines_per_source = defaults.get('max_lines_per_source', 10000)
    max_total_lines = data.get('max_total_lines', defaults.get('max_total_lines', 50000))
    try:
        max_total_lines = int(max_total_lines)
    except Exception:
        max_total_lines = defaults.get('max_total_lines', 50000)
    incremental = bool(data.get('incremental', defaults.get('incremental', True)))
    journal_units = data.get('journal_units', defaults.get('journal_units'))
    save_raw = bool(data.get('save_raw', defaults.get('save_raw', False)))

    if _scheduler is None:
        _scheduler = LogCollectScheduler(
            _collector,
            interval_seconds=interval,
            extra_files=extra_files,
            min_level=min_level,
            since_hours=since_hours,
            include_keywords=include_keywords,
            exclude_keywords=exclude_keywords,
            max_lines_per_source=max_lines_per_source,
            save_raw=save_raw,
        )
        _scheduler.start()
        return jsonify({
            'status': 'started', 
            'interval': interval,
            'storage_info': _collector.get_storage_info()
        })
    else:
        # 重启
        _scheduler.stop()
        _scheduler = LogCollectScheduler(
            _collector,
            interval_seconds=interval,
            extra_files=extra_files,
            min_level=min_level,
            since_hours=since_hours,
            include_keywords=include_keywords,
            exclude_keywords=exclude_keywords,
            max_lines_per_source=max_lines_per_source,
            save_raw=save_raw,
        )
        _scheduler.start()
        return jsonify({
            'status': 'restarted', 
            'interval': interval,
            'storage_info': _collector.get_storage_info()
        })


@logs_bp.route('/stop', methods=['POST'])
def stop_schedule():
    global _scheduler
    if _scheduler:
        _scheduler.stop()
        _scheduler = None
        return jsonify({'status': 'stopped'})
    return jsonify({'status': 'idle'})


@logs_bp.route('/list', methods=['GET'])
def list_logs():
    base_dir = _collector.base_dir
    tree = {}
    
    if not os.path.exists(base_dir):
        return jsonify({'base': base_dir, 'tree': tree, 'error': '目录不存在'})
    
    # 过滤掉分析目录，只处理日志文件目录
    for root, dirs, files in os.walk(base_dir):
        rel = os.path.relpath(root, base_dir)
        
        # 跳过logsanalysis目录
        if 'logsanalysis' in rel.split(os.sep):
            continue
            
        pointer = tree
        if rel != '.':
            for part in rel.split(os.sep):
                pointer = pointer.setdefault(part, {})
        if files:
            pointer['files'] = files
    return jsonify({'base': base_dir, 'tree': tree})


# 兼容旧接口：/api/logs 和 /api/logs/
@logs_bp.route('', methods=['GET'])
@logs_bp.route('/', methods=['GET'])
def list_logs_compat():
    return list_logs()


def _safe_join(base: str, *paths: str) -> str:
    target = os.path.abspath(os.path.join(base, *paths))
    base_abs = os.path.abspath(base)
    if not target.startswith(base_abs + os.sep) and target != base_abs:
        raise ValueError('非法路径')
    return target


@logs_bp.route('/content', methods=['GET'])
def get_log_content():
    base_dir = _collector.base_dir
    rel_path = request.args.get('path', '')
    page = int(request.args.get('page', 1))
    page_size = int(request.args.get('page_size', 500))
    tail = request.args.get('tail', 'false').lower() == 'true'

    if not rel_path:
        return jsonify({'error': '缺少 path 参数'}), 400

    try:
        file_path = _safe_join(base_dir, rel_path)
        if not os.path.exists(file_path) or not os.path.isfile(file_path):
            return jsonify({'error': '文件不存在'}), 404

        total_lines = 0
        with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
            for _ in f:
                total_lines += 1

        if tail:
            # 返回最后一页
            page = max(1, (total_lines + page_size - 1) // page_size)

        start = (page - 1) * page_size
        end = min(start + page_size, total_lines)
        lines = []
        if start < total_lines:
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                for idx, line in enumerate(f):
                    if idx < start:
                        continue
                    if idx >= end:
                        break
                    lines.append(line.rstrip('\n'))

        return jsonify({
            'path': rel_path,
            'page': page,
            'page_size': page_size,
            'total_lines': total_lines,
            'lines': lines
        })
    except ValueError:
        return jsonify({'error': '非法路径'}), 400


@logs_bp.route('/latest', methods=['GET'])
def latest_bucket():
    base_dir = _collector.base_dir
    if not os.path.exists(base_dir):
        return jsonify({'host': None, 'date': None, 'files': []})

    try:
        # 过滤掉logsanalysis目录
        hosts = [d for d in os.listdir(base_dir) 
                if os.path.isdir(os.path.join(base_dir, d)) and d != 'logsanalysis']
        if not hosts:
            return jsonify({'host': None, 'date': None, 'files': []})
        hosts.sort()
        latest_host = hosts[-1]
        host_dir = os.path.join(base_dir, latest_host)
        dates = [d for d in os.listdir(host_dir) if os.path.isdir(os.path.join(host_dir, d))]
        if not dates:
            return jsonify({'host': latest_host, 'date': None, 'files': []})
        dates.sort()
        latest_date = dates[-1]
        date_dir = os.path.join(host_dir, latest_date)
        files = [f for f in os.listdir(date_dir) if os.path.isfile(os.path.join(date_dir, f))]
        files.sort()
        rel_files = [os.path.join(latest_host, latest_date, f).replace('\\', '/') for f in files]
        return jsonify({'host': latest_host, 'date': latest_date, 'files': rel_files})
    except Exception:
        return jsonify({'host': None, 'date': None, 'files': []})


def _load_ai_config():
    """加载AI配置"""
    try:
        config_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), '..', 'UI', 'config.json')
        if os.path.exists(config_path):
            with open(config_path, 'r', encoding='utf-8') as f:
                config = json.load(f)
                return config
    except Exception as e:
        pass
    return None


def _call_ai_model(log_content, config):
    """调用AI模型分析日志"""
    try:
        providers = config.get('providers', {})
        current_provider = config.get('current_provider', 'local')
        provider_config = providers.get(current_provider, {})
        
        if not provider_config:
            raise ValueError(f"未找到提供商配置: {current_provider}")
        
        api_url = provider_config.get('api_url')
        model = provider_config.get('model')
        api_key = provider_config.get('api_key', '')
        
        if not api_url or not model:
            raise ValueError("AI配置不完整")
        
        # 构建请求头
        headers = {
            'Content-Type': 'application/json'
        }
        if api_key:
            headers['Authorization'] = f'Bearer {api_key}'
        
        # 构建请求数据
        system_prompt = """你是一个专业的日志分析专家。请分析以下日志文件内容，并提供详细的分析报告。

请从以下角度进行分析：
1. 错误和异常情况
2. 性能问题
3. 安全风险
4. 系统状态
5. 关键事件
6. 建议和解决方案

请用Markdown格式输出分析结果，包含标题、列表、代码块等格式。"""
        
        user_message = f"请分析以下日志文件内容：\n\n{log_content}"
        
        data = {
            'model': model,
            'messages': [
                {'role': 'system', 'content': system_prompt},
                {'role': 'user', 'content': user_message}
            ],
            'temperature': config.get('temperature', 0.1),
            'max_tokens': config.get('max_tokens', 2048)
        }
        
        # 发送请求
        response = requests.post(
            f"{api_url}/v1/chat/completions",
            headers=headers,
            json=data,
            timeout=60
        )
        
        if response.status_code == 200:
            result = response.json()
            if 'choices' in result and len(result['choices']) > 0:
                return result['choices'][0]['message']['content']
            else:
                raise ValueError("AI响应格式错误")
        else:
            raise ValueError(f"AI请求失败: {response.status_code}")
            
    except Exception as e:
        pass
        return None


# ==================== 日志分析相关API ====================

@logs_bp.route('/analyze', methods=['POST'])
def analyze_log():
    """
    AI分析日志文件
    
    输入: 已采集的日志文件路径
    输出: 分析结果保存到 backend/data/logs/logsanalysis/
    """
    try:
        data = request.get_json(silent=True) or {}
        file_path = data.get('file_path', '')
        provider = data.get('provider', '')  # 新增：模型提供商参数
        
        if not file_path:
            return jsonify({'status': 'error', 'message': '缺少文件路径参数'}), 400
        
        # 构建完整文件路径
        base_dir = _collector.base_dir
        full_path = _safe_join(base_dir, file_path)
        
        if not os.path.exists(full_path) or not os.path.isfile(full_path):
            return jsonify({'status': 'error', 'message': '文件不存在'}), 404
        
        # 读取日志内容（限制行数避免内容过长）
        max_lines = 1000
        log_content = ""
        try:
            with open(full_path, 'r', encoding='utf-8', errors='ignore') as f:
                lines = []
                for i, line in enumerate(f):
                    if i >= max_lines:
                        break
                    lines.append(line.rstrip('\n'))
                log_content = '\n'.join(lines)
                
                if len(lines) == max_lines:
                    log_content += f"\n\n... (文件超过{max_lines}行，仅显示前{max_lines}行)"
        except Exception as e:
            return jsonify({'status': 'error', 'message': f'读取文件失败: {e}'}), 500
        
        # 加载AI配置
        config = _load_ai_config()
        if not config:
            return jsonify({'status': 'error', 'message': 'AI配置加载失败'}), 500
        
        # 验证和设置提供商
        if provider and provider in config.get('providers', {}):
            # 使用指定的提供商
            config['current_provider'] = provider
        elif provider:
            # 提供商不存在
            available_providers = list(config.get('providers', {}).keys())
            return jsonify({
                'status': 'error', 
                'message': f'指定的提供商 "{provider}" 不存在。可用提供商: {available_providers}'
            }), 400
        
        # 调用AI分析
        analysis_result = _call_ai_model(log_content, config)
        if not analysis_result:
            return jsonify({'status': 'error', 'message': 'AI分析失败'}), 500
        
        # 保存分析结果到markdown文件
        try:
            # 使用采集器的分析目录
            save_dir = _collector.get_analysis_dir()
            
            # 生成文件名
            timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
            file_name = os.path.basename(file_path).replace('.log', '')
            analysis_file = f"{file_name}_analysis_{timestamp}.md"
            analysis_path = os.path.join(save_dir, analysis_file)
            
            # 保存markdown文件
            with open(analysis_path, 'w', encoding='utf-8') as f:
                f.write(f"# 日志分析报告\n\n")
                f.write(f"**分析时间**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write(f"**源文件**: {file_path}\n\n")
                f.write("---\n\n")
                f.write(analysis_result)
            
        except Exception as e:
            pass
            # 即使保存失败，也返回分析结果
        
        return jsonify({
            'status': 'success',
            'analysis_result': analysis_result,
            'analysis_file': analysis_file if 'analysis_file' in locals() else None,
            'provider_used': config.get('current_provider', 'local'),
            'model_info': config.get('providers', {}).get(config.get('current_provider', 'local'), {}).get('model', 'unknown'),
            'storage_info': _collector.get_storage_info()
        })
        
    except ValueError as e:
        return jsonify({'status': 'error', 'message': str(e)}), 400
    except Exception as e:
        pass
        return jsonify({'status': 'error', 'message': '分析失败'}), 500


@logs_bp.route('/models', methods=['GET'])
def get_available_models():
    """获取可用的AI模型列表"""
    try:
        config = _load_ai_config()
        if not config:
            return jsonify({'status': 'error', 'message': 'AI配置加载失败'}), 500
        
        providers = config.get('providers', {})
        current_provider = config.get('current_provider', 'local')
        
        models_info = []
        for provider_name, provider_config in providers.items():
            models_info.append({
                'provider': provider_name,
                'model': provider_config.get('model', 'unknown'),
                'type': provider_config.get('type', 'unknown'),
                'api_url': provider_config.get('api_url', ''),
                'is_current': provider_name == current_provider
            })
        
        return jsonify({
            'status': 'success',
            'current_provider': current_provider,
            'available_models': models_info
        })
        
    except Exception as e:
        pass
        return jsonify({'status': 'error', 'message': '获取模型列表失败'}), 500


@logs_bp.route('/analysis', methods=['GET'])
def get_analysis_result():
    """获取指定文件的AI分析结果"""
    try:
        filename = request.args.get('filename', '')
        if not filename:
            return jsonify({'status': 'error', 'message': '缺少filename参数'}), 400
        
        # 构建分析报告文件路径 - 使用采集器的分析目录
        save_dir = _collector.get_analysis_dir()
        
        # 查找匹配的分析报告文件
        analysis_files = []
        if os.path.exists(save_dir):
            # 移除文件扩展名进行匹配
            base_filename = filename.replace('.log', '').replace('.txt', '')
            
            for file in os.listdir(save_dir):
                if file.endswith('.md') and file.startswith(base_filename + '_analysis_'):
                    analysis_files.append(file)
        
        if not analysis_files:
            return jsonify({'status': 'error', 'message': '未找到分析报告'}), 404
        
        # 获取最新的分析报告
        latest_file = max(analysis_files, key=lambda f: os.path.getmtime(os.path.join(save_dir, f)))
        analysis_path = os.path.join(save_dir, latest_file)
        
        # 读取分析报告内容
        with open(analysis_path, 'r', encoding='utf-8') as f:
            content = f.read()
        
        # 提取分析结果部分（去掉标题和时间信息）
        lines = content.split('\n')
        analysis_result = []
        skip_header = True
        
        for line in lines:
            if line.startswith('---'):
                skip_header = False
                continue
            if not skip_header:
                analysis_result.append(line)
        
        analysis_content = '\n'.join(analysis_result).strip()
        
        return jsonify({
            'status': 'success',
            'analysis_result': analysis_content,
            'analysis_file': latest_file,
            'created_time': os.path.getctime(analysis_path)
        })
        
    except Exception as e:
        pass
        return jsonify({'status': 'error', 'message': '获取分析结果失败'}), 500


@logs_bp.route('/analysis-list', methods=['GET'])
def get_analysis_list():
    """获取所有AI分析报告列表"""
    try:
        save_dir = _collector.get_analysis_dir()
        
        if not os.path.exists(save_dir):
            return jsonify({'status': 'success', 'analysis_files': []})
        
        analysis_files = []
        for file in os.listdir(save_dir):
            if file.endswith('.md'):
                file_path = os.path.join(save_dir, file)
                analysis_files.append({
                    'filename': file,
                    'created_time': os.path.getctime(file_path),
                    'size': os.path.getsize(file_path)
                })
        
        # 按创建时间倒序排列
        analysis_files.sort(key=lambda x: x['created_time'], reverse=True)
        
        return jsonify({
            'status': 'success',
            'analysis_files': analysis_files
        })
        
    except Exception as e:
        pass
        return jsonify({'status': 'error', 'message': '获取分析报告列表失败'}), 500


@logs_bp.route('/storage-info', methods=['GET'])
def get_storage_info():
    """获取存储信息"""
    try:
        storage_info = _collector.get_storage_info()
        return jsonify({
            'status': 'success',
            'storage_info': storage_info
        })
    except Exception as e:
        pass
        return jsonify({'status': 'error', 'message': '获取存储信息失败'}), 500


