import os
import requests
import re
import sys
import webbrowser
import base64
from datetime import datetime
from urllib.parse import unquote, urlparse, quote
from flask import Flask, jsonify, render_template, url_for, Response
import json

# 将根目录添加到 sys.path 以便导入 config_manager
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from config_manager import get_config

app = Flask(__name__)

# --- 全局变量 ---
CONFIG = {}
HEADERS = {}
IMAGE_SUBDIR = 'images'
STATIC_IMAGE_PATH = os.path.join('static', IMAGE_SUBDIR)


# --- 图片下载逻辑 ---
def download_image_locally(url, headers, save_path):
    """下载图片并保存到本地"""
    try:
        response = requests.get(url, headers=headers, verify=False, timeout=15)
        response.raise_for_status()
        with open(save_path, 'wb') as f:
            f.write(response.content)
        return True
    except Exception as e:
        print(f"  -> [Web服务警告] 无法下载图片 {url}: {e}")
        return False

# --- GitLab 数据获取逻辑 (从原脚本迁移和改造) ---
def fetch_all_issues(gitlab_url, headers, assignee_username):
    issues = []
    page = 1
    while True:
        params = {
            'assignee_username': assignee_username,
            'scope': 'all',
            'per_page': 100,
            'page': page
        }
        print(f"Web服务: 正在获取第 {page} 页的所有问题...")
        try:
            response = requests.get(f"{gitlab_url}/api/v4/issues", headers=headers, params=params, verify=False)
            response.raise_for_status()
            data = response.json()
            if not data: break
            issues.extend(data)
            page += 1
        except requests.exceptions.RequestException as e:
            print(f"Web服务: 请求API时发生错误: {e}")
            return None
    return issues


def process_and_stream_issues():
    """
    这是一个生成器函数，它会实时产生（yield）状态更新的日志，
    并在最后产生一个包含所有问题数据的事件。
    """
    def stream_log(message):
        """辅助函数，用于格式化SSE日志事件"""
        return f"event: log\ndata: {message}\n\n"

    try:
        yield stream_log("流程开始：准备环境...")
        
        # --- 确保图片目录存在 ---
        os.makedirs(STATIC_IMAGE_PATH, exist_ok=True)
        yield stream_log(f"静态图片目录 '{STATIC_IMAGE_PATH}' 已确认。")

        # --- 1. 获取所有原始 Issue 数据 ---
        all_issues_raw = []
        page = 1
        while True:
            params = {
                'assignee_username': CONFIG['username'],
                'scope': 'all',
                'per_page': 100,
                'page': page
            }
            yield stream_log(f"正在从 GitLab API 获取第 {page} 页的问题...")
            try:
                response = requests.get(f"{CONFIG['url']}/api/v4/issues", headers=HEADERS, params=params, verify=False)
                response.raise_for_status()
                data = response.json()
                if not data:
                    yield stream_log("所有页面都已获取完毕。")
                    break
                all_issues_raw.extend(data)
                yield stream_log(f"成功获取第 {page} 页，目前共 {len(all_issues_raw)} 条问题。")
                page += 1
            except requests.exceptions.RequestException as e:
                yield stream_log(f"[错误] 请求API时发生错误: {e}")
                return

        issues = [issue for issue in all_issues_raw if issue.get('state') == 'opened']
        yield stream_log(f"数据过滤完成，共有 {len(issues)} 条 'opened' 状态的问题需要处理。")

        # --- 2. 收集所有需要下载的图片信息 ---
        images_to_process = []
        for issue in issues:
            description_raw = issue.get('description') or ''
            web_url = issue.get('web_url', '')
            project_url = web_url.split('/-/issues/')[0] if '/-/issues/' in web_url else ''
            if not project_url: continue

            for match in re.finditer(r"!\[.*?\]\((/uploads/[^)]+)\)", description_raw):
                relative_path = match.group(1)
                full_url = f"{project_url}{relative_path}"
                try:
                    original_filename = os.path.basename(unquote(urlparse(full_url).path))
                    local_filename = f"{issue['id']}_{issue['iid']}_{original_filename}"
                except Exception:
                    local_filename = f"{issue['id']}_{issue['iid']}_{base64.b64encode(relative_path.encode()).decode('utf-8')[:12]}.png"
                
                local_image_path = os.path.join(STATIC_IMAGE_PATH, local_filename)
                frontend_path = f"static/{IMAGE_SUBDIR}/{quote(local_filename)}"
                
                images_to_process.append({
                    'full_url': full_url,
                    'local_path': local_image_path,
                    'frontend_path': frontend_path
                })

        yield stream_log(f"分析完成，共发现 {len(images_to_process)} 张图片需要处理。")

        # --- 3. 下载所有图片并记录URL到HTML的映射 ---
        url_to_html_map = {}
        total_images = len(images_to_process)
        for i, img_info in enumerate(images_to_process):
            full_url = img_info['full_url']
            if full_url in url_to_html_map: continue # 避免重复下载

            yield stream_log(f"({i + 1}/{total_images}) 正在下载图片: ...{full_url[-40:]}")
            if download_image_locally(full_url, HEADERS, img_info['local_path']):
                url_to_html_map[full_url] = f'<img src="{img_info["frontend_path"]}" alt="Embedded Image">'
            else:
                url_to_html_map[full_url] = f'<br>[图片加载失败: {full_url}]<br>'
        
        yield stream_log("图片处理完成，正在整合最终数据...")
        
        # --- 4. 处理问题描述并进行分类 ---
        priorities = ['P0', 'P1', 'P2', 'P3']
        categorized_issues = {p: [] for p in priorities}
        categorized_issues['无优先级'] = []

        for issue in issues:
            description_raw = issue.get('description') or ''
            web_url = issue.get('web_url', '')
            project_url = web_url.split('/-/issues/')[0] if '/-/issues/' in web_url else ''

            def replace_callback(match):
                if not project_url: return "[图片路径解析失败]"
                relative_path = match.group(1)
                full_url = f"{project_url}{relative_path}"
                return url_to_html_map.get(full_url, f"[图片处理异常: {full_url}]")

            final_description_html = re.sub(r"!\[.*?\]\((/uploads/[^)]+)\)", replace_callback, description_raw)

            author_name = issue.get('author', {}).get('name', 'N/A')
            assignee_name = issue.get('assignee', {}).get('name', 'N/A')
            created_at = datetime.fromisoformat(issue['created_at'].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            updated_at = datetime.fromisoformat(issue['updated_at'].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
            
            processed_issue = {
                'id': issue['id'], 'iid': issue['iid'], 'title': issue.get('title', 'No Title'),
                'description': final_description_html, 'web_url': issue.get('web_url', ''),
                'state': issue.get('state'), 'labels': issue.get('labels', []),
                'author': author_name, 'assignee': assignee_name,
                'created_at': created_at, 'updated_at': updated_at
            }

            issue_priority = '无优先级'
            for p in priorities:
                if p in processed_issue['labels']:
                    issue_priority = p
                    break
            categorized_issues[issue_priority].append(processed_issue)

        yield stream_log("所有问题处理完毕，正在准备最终数据...")
        
        final_data = json.dumps(categorized_issues)
        yield f"event: data\ndata: {final_data}\n\n"

        yield stream_log("流程结束。")

    except Exception as e:
        # 在流式传输中捕获任何意外错误
        yield stream_log(f"[严重错误] 处理过程中发生异常: {e}")
        # 在开发/调试时，可能希望看到更详细的堆栈跟踪
        import traceback
        yield stream_log(f"堆栈跟踪: {traceback.format_exc()}")


# --- API 端点 ---
@app.route('/stream-updates')
def stream_updates():
    """新的SSE端点，用于流式传输更新"""
    return Response(process_and_stream_issues(), mimetype='text/event-stream')

@app.route('/api/issues')
def get_issues_api():
    print("Web服务: 收到 /api/issues 请求...")
    
    # --- 确保图片目录存在 ---
    os.makedirs(STATIC_IMAGE_PATH, exist_ok=True)
    
    all_issues = fetch_all_issues(CONFIG['url'], HEADERS, CONFIG['username'])
    if all_issues is None:
        return jsonify({"error": "无法从GitLab获取问题"}), 500

    issues = [issue for issue in all_issues if issue.get('state') == 'opened']
    
    # 按优先级分类
    priorities = ['P0', 'P1', 'P2', 'P3']
    categorized_issues = {p: [] for p in priorities}
    categorized_issues['无优先级'] = []

    for issue in issues:
        description_raw = issue.get('description') or ''
        web_url = issue.get('web_url', '')

        def replace_markdown_with_html_image(match):
            relative_path = match.group(1)
            project_url = web_url.split('/-/issues/')[0] if '/-/issues/' in web_url else ''
            
            if not project_url: return "[图片路径解析失败]"

            full_url = f"{project_url}{relative_path}"
            
            try:
                original_filename = os.path.basename(unquote(urlparse(full_url).path))
                local_filename = f"{issue['id']}_{issue['iid']}_{original_filename}"
            except Exception:
                local_filename = f"{issue['id']}_{issue['iid']}_{base64.b64encode(relative_path.encode()).decode('utf-8')[:12]}.png"

            local_image_path = os.path.join(STATIC_IMAGE_PATH, local_filename)
            # 直接使用 / 拼接URL路径，并对文件名进行URL编码
            frontend_path = f"static/{IMAGE_SUBDIR}/{quote(local_filename)}"

            print(f"  -> 正在下载图片: {full_url[-50:]}")
            if download_image_locally(full_url, HEADERS, local_image_path):
                return f'<img src="{frontend_path}" alt="Embedded Image">'
            else:
                return f'<br>[图片加载失败: {full_url}]<br>'

        # 使用 re.sub 和替换函数来原地替换所有图片
        final_description_html = re.sub(r"!\[.*?\]\((/uploads/[^)]+)\)", replace_markdown_with_html_image, description_raw)

        # 提取关键信息
        author_name = issue.get('author', {}).get('name', 'N/A')
        assignee_name = issue.get('assignee', {}).get('name', 'N/A')
        created_at = datetime.fromisoformat(issue['created_at'].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
        updated_at = datetime.fromisoformat(issue['updated_at'].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')
        
        processed_issue = {
            'id': issue['id'],
            'iid': issue['iid'],
            'title': issue.get('title', 'No Title'),
            'description': final_description_html,
            'web_url': issue.get('web_url', ''),
            'state': issue.get('state'),
            'labels': issue.get('labels', []),
            'author': author_name,
            'assignee': assignee_name,
            'created_at': created_at,
            'updated_at': updated_at
        }

        # 分类
        issue_priority = '无优先级'
        for p in priorities:
            if p in processed_issue['labels']:
                issue_priority = p
                break
        categorized_issues[issue_priority].append(processed_issue)
        
    print(f"Web服务: 数据处理完毕，返回 {len(issues)} 条 'opened' 状态的问题。")
    return jsonify(categorized_issues)


# --- 页面路由 ---
@app.route('/')
def home():
    return render_template('index.html')

def main():
    global CONFIG, HEADERS
    try:
        # 我们需要从父目录的 config.ini 读取配置
        print("Web服务: 正在加载配置...")
        CONFIG = get_config()
        HEADERS = {'PRIVATE-TOKEN': CONFIG['token']}
        print("Web服务: 配置加载成功。")
    except Exception as e:
        print(f"Web服务: 启动失败，加载配置时出错: {e}")
        sys.exit(1)
        
    # 禁用 requests 的警告
    import urllib3
    urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
    
    # 自动在浏览器中打开
    webbrowser.open_new("http://127.0.0.1:5000")

    app.run(host='127.0.0.1', port=5000)

if __name__ == '__main__':
    main() 