import subprocess
import sys

from flask import Flask, render_template, request, jsonify, session, redirect, url_for
from flask_cors import CORS
from werkzeug.security import generate_password_hash, check_password_hash
import pymysql
import json
from datetime import datetime, timedelta
import os
import uuid
import jwt
import threading
import time
from functools import wraps
from ai_assistant import create_ai_assistant, JobKnowledgeBase

# 在文件开头添加配置
SPARK_APP_ID = '0adb1398'
SPARK_API_SECRET = 'UJnvocAcTLWdATUCivPv:YnsopnQsfkJanwuLoFCV'
SPARK_API_KEY = '4e0ab6929faad9ff81b6b84d0fc8bf76'  # 替换为您的API Key

# === 修改：修复爬虫路径设置 ===
# 获取当前文件的绝对路径
current_dir = os.path.dirname(os.path.abspath(__file__))
# 添加项目根目录到系统路径
sys.path.append(current_dir)
# 添加crawler目录到系统路径
crawler_dir = os.path.join(current_dir, 'crawler')
if os.path.exists(crawler_dir):
    sys.path.append(crawler_dir)

print(f"当前目录: {current_dir}")
print(f"爬虫目录: {crawler_dir}")
print(f"系统路径: {sys.path}")

app = Flask(__name__)
app.secret_key = 'abc123456'  # 生产环境请使用强密钥
CORS(app, supports_credentials=True)

# 数据库配置
DB_CONFIG = {
    'host': '127.0.0.1',
    'port': 3306,
    'user': 'root',
    'password': '123456',
    'database': 'recruitment_analysis',
    'charset': 'utf8mb4'
}

# JWT配置
JWT_SECRET = 'abc123456'
JWT_ALGORITHM = 'HS256'


def get_db_connection():
    """获取数据库连接"""
    return pymysql.connect(**DB_CONFIG)


def login_required(f):
    """登录装饰器"""

    @wraps(f)
    def decorated_function(*args, **kwargs):
        token = request.headers.get('Authorization', '').replace('Bearer ', '')
        if not token:
            return jsonify({'success': False, 'message': '请先登录'}), 401

        try:
            payload = jwt.decode(token, JWT_SECRET, algorithms=[JWT_ALGORITHM])
            user_id = payload.get('user_id')
            if not user_id:
                return jsonify({'success': False, 'message': '令牌无效'}), 401
        except jwt.ExpiredSignatureError:
            return jsonify({'success': False, 'message': '登录已过期'}), 401
        except jwt.InvalidTokenError:
            return jsonify({'success': False, 'message': '令牌无效'}), 401

        # 将用户ID添加到请求上下文中
        request.user_id = user_id
        return f(*args, **kwargs)

    return decorated_function


@app.route('/')
def index():
    return render_template('index.html')


@app.route('/login')
def login_page():
    return render_template('login.html')


@app.route('/register')
def register_page():
    return render_template('register.html')


# 用户认证API
@app.route('/api/register', methods=['POST'])
def register():
    try:
        data = request.get_json()
        username = data.get('username', '').strip()
        email = data.get('email', '').strip()
        password = data.get('password', '')

        if not username or not email or not password:
            return jsonify({'success': False, 'message': '请填写所有必填字段'})

        if len(password) < 6:
            return jsonify({'success': False, 'message': '密码长度至少6位'})

        conn = get_db_connection()
        cursor = conn.cursor()

        # 检查用户名和邮箱是否已存在
        cursor.execute("SELECT id FROM users WHERE username = %s OR email = %s", (username, email))
        if cursor.fetchone():
            cursor.close()
            conn.close()
            return jsonify({'success': False, 'message': '用户名或邮箱已存在'})

        # 创建用户
        password_hash = generate_password_hash(password)
        cursor.execute(
            "INSERT INTO users (username, email, password_hash) VALUES (%s, %s, %s)",
            (username, email, password_hash)
        )
        conn.commit()
        user_id = cursor.lastrowid

        # 生成JWT token
        token = jwt.encode({
            'user_id': user_id,
            'username': username,
            'exp': datetime.utcnow() + timedelta(days=7)
        }, JWT_SECRET, algorithm=JWT_ALGORITHM)

        cursor.close()
        conn.close()

        return jsonify({
            'success': True,
            'message': '注册成功',
            'token': token,
            'user': {'id': user_id, 'username': username, 'email': email}
        })

    except Exception as e:
        return jsonify({'success': False, 'message': f'注册失败: {str(e)}'}), 500


@app.route('/api/login', methods=['POST'])
def login():
    try:
        data = request.get_json()
        username = data.get('username', '').strip()
        password = data.get('password', '')

        if not username or not password:
            return jsonify({'success': False, 'message': '请填写用户名和密码'})

        conn = get_db_connection()
        cursor = conn.cursor(pymysql.cursors.DictCursor)

        # 查找用户
        cursor.execute(
            "SELECT id, username, email, password_hash FROM users WHERE username = %s AND is_active = 1",
            (username,)
        )
        user = cursor.fetchone()

        if not user or not check_password_hash(user['password_hash'], password):
            cursor.close()
            conn.close()
            return jsonify({'success': False, 'message': '用户名或密码错误'})

        # 更新最后登录时间
        cursor.execute("UPDATE users SET last_login = NOW() WHERE id = %s", (user['id'],))
        conn.commit()

        # 生成JWT token
        token = jwt.encode({
            'user_id': user['id'],
            'username': user['username'],
            'exp': datetime.utcnow() + timedelta(days=7)
        }, JWT_SECRET, algorithm=JWT_ALGORITHM)

        cursor.close()
        conn.close()

        return jsonify({
            'success': True,
            'message': '登录成功',
            'token': token,
            'user': {'id': user['id'], 'username': user['username'], 'email': user['email']}
        })

    except Exception as e:
        return jsonify({'success': False, 'message': f'登录失败: {str(e)}'}), 500


@app.route('/api/user/profile')
@login_required
def get_user_profile():
    try:
        user_id = request.user_id
        conn = get_db_connection()
        cursor = conn.cursor(pymysql.cursors.DictCursor)

        cursor.execute(
            "SELECT id, username, email, created_at, last_login FROM users WHERE id = %s",
            (user_id,)
        )
        user = cursor.fetchone()

        # 获取用户的爬取任务统计
        cursor.execute("""
            SELECT 
                COUNT(*) as total_tasks,
                SUM(CASE WHEN status = 'completed' THEN 1 ELSE 0 END) as completed_tasks,
                SUM(result_count) as total_jobs
            FROM crawl_tasks 
            WHERE user_id = %s
        """, (user_id,))
        stats = cursor.fetchone()

        cursor.close()
        conn.close()

        return jsonify({
            'success': True,
            'data': {
                'user': user,
                'stats': stats
            }
        })

    except Exception as e:
        return jsonify({'success': False, 'message': str(e)}), 500


# 修改初始化函数
# 修改 init_ai_assistant 函数
def init_ai_assistant(use_backup=False):
    """初始化AI助手"""
    # 数据库配置
    db_config = {
        'host': '127.0.0.1',
        'port': 3306,
        'user': 'root',
        'password': '123456',
        'database': 'recruitment_analysis',
        'charset': 'utf8mb4'
    }

    knowledge_base = JobKnowledgeBase(db_config)

    # 修复：正确传递参数
    ai_assistant = create_ai_assistant(
        db_config=db_config,  # 添加参数名
        use_backup=use_backup
    )

    return knowledge_base, ai_assistant


# 数据统计API
@app.route('/api/stats')
@login_required
def get_stats():
    """获取统计信息API - 修复数据结构"""
    try:
        user_id = request.user_id
        print(f"用户 {user_id} 请求统计信息")

        conn = get_db_connection()
        cursor = conn.cursor(pymysql.cursors.DictCursor)

        # 基本统计
        cursor.execute("SELECT COUNT(*) as total_jobs FROM bosszp")
        total_jobs = cursor.fetchone()['total_jobs']

        cursor.execute("SELECT COUNT(DISTINCT company_city) as total_cities FROM bosszp")
        total_cities = cursor.fetchone()['total_cities']

        cursor.execute("SELECT COUNT(DISTINCT keyword) as total_keywords FROM bosszp")
        total_keywords = cursor.fetchone()['total_keywords']

        cursor.execute("SELECT COUNT(DISTINCT job_company) as total_companies FROM bosszp")
        total_companies = cursor.fetchone()['total_companies']

        # 城市分布
        cursor.execute("""
                    SELECT company_city, COUNT(*) as count 
                    FROM bosszp 
                    GROUP BY company_city 
                    ORDER BY count DESC 
                    LIMIT 10
                """)
        city_distribution = cursor.fetchall()

        # 关键词分布
        cursor.execute("""
                    SELECT keyword, COUNT(*) as count 
                    FROM bosszp 
                    GROUP BY keyword 
                    ORDER BY count DESC 
                    LIMIT 10
                """)
        keyword_distribution = cursor.fetchall()

        # 薪资分布（简化处理）
        cursor.execute("""
                    SELECT 
                        CASE 
                            WHEN job_salary LIKE '%以下%' THEN '0-5K'
                            WHEN job_salary LIKE '%以上%' THEN '30K+'
                            WHEN job_salary REGEXP '^[0-9]+-[0-9]+K' THEN 
                                CONCAT(
                                    SUBSTRING_INDEX(job_salary, '-', 1), 
                                    'K-',
                                    SUBSTRING_INDEX(SUBSTRING_INDEX(job_salary, 'K', 1), '-', -1),
                                    'K'
                                )
                            ELSE '其他'
                        END as salary_range,
                        COUNT(*) as count
                    FROM bosszp 
                    GROUP BY salary_range 
                    ORDER BY COUNT(*) DESC
                """)
        salary_distribution = cursor.fetchall()

        # 最新数据时间
        cursor.execute("SELECT MAX(crawl_time) as latest_time FROM bosszp")
        latest_time = cursor.fetchone()['latest_time']

        cursor.close()
        conn.close()

        return jsonify({
            'success': True,
            'data': {
                'total_jobs': total_jobs,
                'total_cities': total_cities,
                'total_keywords': total_keywords,
                'total_companies': total_companies,
                'latest_time': latest_time.strftime('%Y-%m-%d %H:%M:%S') if latest_time else None,
                'city_distribution': city_distribution,
                'keyword_distribution': keyword_distribution,
                'salary_distribution': salary_distribution
            }
        })

    except Exception as e:
        return jsonify({'success': False, 'message': str(e)}), 500


@app.route('/api/cities')
@login_required
def get_cities():
    """获取城市列表"""
    try:
        user_id = request.user_id
        conn = get_db_connection()
        cursor = conn.cursor()
        cursor.execute("SELECT DISTINCT company_city FROM bosszp WHERE user_id = %s ORDER BY company_city", (user_id,))
        cities = [row[0] for row in cursor.fetchall()]
        cursor.close()
        conn.close()
        return jsonify({'success': True, 'data': cities})
    except Exception as e:
        return jsonify({'success': False, 'message': str(e)}), 500


@app.route('/api/keywords')
@login_required
def get_keywords():
    """获取关键词列表"""
    try:
        user_id = request.user_id
        conn = get_db_connection()
        cursor = conn.cursor()
        cursor.execute("SELECT DISTINCT keyword FROM bosszp WHERE user_id = %s ORDER BY keyword", (user_id,))
        keywords = [row[0] for row in cursor.fetchall()]
        cursor.close()
        conn.close()
        return jsonify({'success': True, 'data': keywords})
    except Exception as e:
        return jsonify({'success': False, 'message': str(e)}), 500


# 爬取任务API
@app.route('/api/crawl/start', methods=['POST'])
@login_required
def start_crawl():
    try:
        user_id = request.user_id
        data = request.get_json()

        keyword = data.get('keyword', '').strip()
        city = data.get('city', '').strip()
        pages = min(int(data.get('pages', 5)), 20)  # 限制最大页数
        mode = data.get('mode', 'drissionpage')

        if not keyword or not city:
            return jsonify({'success': False, 'message': '请填写关键词和城市'})

        if len(keyword) < 1 or len(keyword) > 50:
            return jsonify({'success': False, 'message': '关键词长度应在1-50字符之间'})

        conn = get_db_connection()
        cursor = conn.cursor()

        # 检查是否有正在运行的相同任务（防止重复爬取）
        cursor.execute("""
            SELECT id FROM crawl_tasks 
            WHERE user_id = %s AND keyword = %s AND city = %s 
            AND status IN ('pending', 'running')
            LIMIT 1
        """, (user_id, keyword, city))

        if cursor.fetchone():
            cursor.close()
            conn.close()
            return jsonify({'success': False, 'message': '相同关键词和城市的任务正在运行中'})

        # 创建爬取任务
        cursor.execute("""
                   INSERT INTO crawl_tasks (user_id, keyword, city, pages, mode, status) 
                   VALUES (%s, %s, %s, %s, %s, 'pending')
               """, (user_id, keyword, city, pages, mode))
        task_id = cursor.lastrowid
        conn.commit()

        # 在新线程中启动爬取
        thread = threading.Thread(
            target=run_crawl_task,
            args=(task_id, keyword, city, pages, mode, user_id),
            daemon=True
        )
        thread.start()

        cursor.close()
        conn.close()

        return jsonify({
            'success': True,
            'message': '爬取任务已开始，请耐心等待',
            'task_id': task_id,
            'data': {
                'keyword': keyword,
                'city': city,
                'pages': pages
            }
        })

    except Exception as e:
        return jsonify({'success': False, 'message': f'启动爬取失败: {str(e)}'}), 500

def run_crawl_task(task_id, keyword, city, pages, mode, user_id):
    """在后台运行爬取任务"""
    try:
        conn = get_db_connection()
        cursor = conn.cursor()

        # 更新任务状态为运行中
        cursor.execute(
            "UPDATE crawl_tasks SET status = 'running', started_at = NOW() WHERE id = %s",
            (task_id,)
        )
        conn.commit()

        # 调用实际的爬虫程序
        result_count = run_real_crawler(keyword, city, pages, user_id, task_id, mode)

        # 更新任务状态为完成
        cursor.execute(
            "UPDATE crawl_tasks SET status = 'completed', result_count = %s, completed_at = NOW() WHERE id = %s",
            (result_count, task_id)
        )
        conn.commit()

        cursor.close()
        conn.close()

    except Exception as e:
        print(f"爬取任务失败: {str(e)}")
        # 更新任务状态为失败
        try:
            cursor.execute(
                "UPDATE crawl_tasks SET status = 'failed', error_message = %s WHERE id = %s",
                (str(e), task_id)
            )
            conn.commit()
        except Exception as update_error:
            print(f"更新任务状态失败: {str(update_error)}")
        finally:
            if cursor:
                cursor.close()
            if conn:
                conn.close()


def run_real_crawler(keyword, city, pages, user_id, task_id, mode='drissionpage'):
    """调用实际的爬虫程序"""
    try:
        # 导入新创建的爬虫模块
        from crawler.simple_spider import run_crawl

        result_count = run_crawl(
            keyword=keyword,
            city=city,
            pages=pages,
            user_id=user_id,
            task_id=task_id
        )
        return result_count

    except Exception as e:
        print(f"调用爬虫程序失败: {str(e)}")
        return 0


def run_crawler_via_subprocess(keyword, city, pages, user_id, task_id, mode):
    """通过子进程运行爬虫"""
    try:
        # 构建爬虫命令
        crawler_script = os.path.join(os.path.dirname(__file__), 'crawler', 'visual_spider.py')

        cmd = [
            sys.executable,
            crawler_script,
            '--keyword', keyword,
            '--city', city,
            '--pages', str(pages),
            '--user_id', str(user_id),
            '--task_id', str(task_id),
            '--mode', mode,
            '--headless'
        ]

        # 运行爬虫
        result = subprocess.run(
            cmd,
            capture_output=True,
            text=True,
            timeout=3600,  # 1小时超时
            cwd=os.path.dirname(__file__)
        )

        if result.returncode == 0:
            # 从输出中解析结果数量
            if '成功爬取' in result.stdout or '保存数据' in result.stdout:
                # 简单的日志解析，您可以根据实际爬虫输出调整
                lines = result.stdout.split('\n')
                for line in lines:
                    if '条数据' in line or '个职位' in line:
                        import re
                        numbers = re.findall(r'\d+', line)
                        if numbers:
                            return int(numbers[0])
            return 10  # 默认返回值
        else:
            print(f"爬虫子进程错误: {result.stderr}")
            return 0

    except subprocess.TimeoutExpired:
        print("爬虫执行超时")
        return 0
    except Exception as e:
        print(f"子进程执行错误: {str(e)}")
        return 0


def simulate_crawl(keyword, city, pages, user_id, task_id):
    """模拟爬取过程"""
    # 这里应该调用您实际的爬虫代码
    # 暂时用模拟数据代替
    import random
    import time

    total_pages = min(pages, 10)
    for page in range(1, total_pages + 1):
        time.sleep(2)  # 模拟爬取延迟

    result_count = random.randint(5, 50)

    # 模拟保存数据到数据库
    conn = get_db_connection()
    cursor = conn.cursor()

    # 这里应该插入真实的爬取数据
    # 暂时插入一些模拟数据
    for i in range(result_count):
        cursor.execute("""
            INSERT INTO bosszp (job_title, job_salary, job_company, company_city, keyword, user_id, task_id)
            VALUES (%s, %s, %s, %s, %s, %s, %s)
        """, (
            f"{keyword}开发工程师-模拟{i}",
            f"{random.randint(10, 40)}K-{random.randint(20, 60)}K",
            f"模拟公司{i}",
            city,
            keyword,
            user_id,
            task_id
        ))

    conn.commit()
    cursor.close()
    conn.close()

    return result_count


@app.route('/api/crawl/tasks')
@login_required
def get_crawl_tasks():
    try:
        user_id = request.user_id
        page = int(request.args.get('page', 1))
        per_page = int(request.args.get('per_page', 10))
        offset = (page - 1) * per_page

        conn = get_db_connection()
        cursor = conn.cursor(pymysql.cursors.DictCursor)

        cursor.execute("""
            SELECT id, keyword, city, pages, mode, status, result_count, 
                   error_message, created_at, started_at, completed_at
            FROM crawl_tasks 
            WHERE user_id = %s 
            ORDER BY created_at DESC 
            LIMIT %s OFFSET %s
        """, (user_id, per_page, offset))
        tasks = cursor.fetchall()

        cursor.execute("SELECT COUNT(*) as total FROM crawl_tasks WHERE user_id = %s", (user_id,))
        total = cursor.fetchone()['total']

        cursor.close()
        conn.close()

        return jsonify({
            'success': True,
            'data': tasks,
            'pagination': {
                'page': page,
                'per_page': per_page,
                'total': total,
                'pages': (total + per_page - 1) // per_page
            }
        })

    except Exception as e:
        return jsonify({'success': False, 'message': str(e)}), 500

# 修改原有的数据API，加入用户权限控制
# 修复数据库查询，确保能正确获取数据
@app.route('/api/jobs')
@login_required
def get_jobs():
    try:
        user_id = request.user_id
        # 获取查询参数
        page = int(request.args.get('page', 1))
        per_page = int(request.args.get('per_page', 20))
        keyword = request.args.get('keyword', '')
        city = request.args.get('city', '')
        salary_range = request.args.get('salary_range', '')

        # 计算分页
        offset = (page - 1) * per_page

        conn = get_db_connection()
        cursor = conn.cursor(pymysql.cursors.DictCursor)

        # 修复查询条件构建
        where_conditions = []  # 先改为1=1测试是否能获取所有数据
        params = []

        # 暂时注释掉用户ID过滤，测试数据获取
        # where_conditions.append("user_id = %s")
        # params.append(user_id)

        if keyword:
            where_conditions.append("(job_title LIKE %s OR keyword LIKE %s)")
            params.extend([f'%{keyword}%', f'%{keyword}%'])

        if city:
            where_conditions.append("company_city = %s")
            params.append(city)
        if salary_range:
            if salary_range == '0-10':
                where_conditions.append(
                    "(REPLACE(job_salary, 'K', '') REGEXP '^[0-9]+-[0-9]+$' AND CAST(SUBSTRING_INDEX(REPLACE(job_salary, 'K', ''), '-', 1) AS UNSIGNED) < 10)")
            elif salary_range == '10-20':
                where_conditions.append(
                    "(REPLACE(job_salary, 'K', '') REGEXP '^[0-9]+-[0-9]+$' AND CAST(SUBSTRING_INDEX(REPLACE(job_salary, 'K', ''), '-', 1) AS UNSIGNED) >= 10 AND CAST(SUBSTRING_INDEX(REPLACE(job_salary, 'K', ''), '-', 1) AS UNSIGNED) < 20)")
            elif salary_range == '20-30':
                where_conditions.append(
                    "(REPLACE(job_salary, 'K', '') REGEXP '^[0-9]+-[0-9]+$' AND CAST(SUBSTRING_INDEX(REPLACE(job_salary, 'K', ''), '-', 1) AS UNSIGNED) >= 20 AND CAST(SUBSTRING_INDEX(REPLACE(job_salary, 'K', ''), '-', 1) AS UNSIGNED) < 30)")
            elif salary_range == '30+':
                where_conditions.append(
                    "(REPLACE(job_salary, 'K', '') REGEXP '^[0-9]+-[0-9]+$' AND CAST(SUBSTRING_INDEX(REPLACE(job_salary, 'K', ''), '-', 1) AS UNSIGNED) >= 30)")

        where_clause = " AND ".join(where_conditions) if where_conditions else "1=1"

        # 测试查询：先获取所有数据
        sql = f"""
        SELECT 
            id, job_title, job_salary, job_lable, job_company,
            job_company_tag, company_city, job_skill, keyword,
            DATE_FORMAT(crawl_time, '%%Y-%%m-%%d %%H:%%i:%%s') as crawl_time
        FROM bosszp 
        WHERE {where_clause}
        ORDER BY crawl_time DESC 
        LIMIT %s OFFSET %s
        """

        params.extend([per_page, offset])
        print(f"执行SQL: {sql}")  # 调试输出
        print(f"参数: {params}")  # 调试输出

        cursor.execute(sql, params)
        jobs = cursor.fetchall()
        print(f"查询到 {len(jobs)} 条数据")  # 调试输出

        # 查询总数
        count_sql = f"SELECT COUNT(*) as total FROM bosszp WHERE {where_clause}"
        cursor.execute(count_sql, params[:-2])  # 去掉LIMIT参数
        total_result = cursor.fetchone()
        total = total_result['total'] if total_result else 0

        cursor.close()
        conn.close()

        return jsonify({
            'success': True,
            'data': jobs,
            'pagination': {
                'page': page,
                'per_page': per_page,
                'total': total,
                'pages': (total + per_page - 1) // per_page if total > 0 else 1
            }
        })

    except Exception as e:
        print(f"API错误: {str(e)}")  # 调试输出
        return jsonify({'success': False, 'message': str(e)}), 500


@app.route('/api/export')
@login_required
def export_data():
    """导出数据为CSV"""
    try:
        user_id = request.user_id
        keyword = request.args.get('keyword', '')
        city = request.args.get('city', '')
        salary_range = request.args.get('salary_range', '')

        from flask import make_response
        import csv
        import io

        conn = get_db_connection()
        cursor = conn.cursor(pymysql.cursors.DictCursor)

        # 构建查询条件
        where_conditions = ["user_id = %s"]
        params = [user_id]

        if keyword:
            where_conditions.append("(job_title LIKE %s OR keyword LIKE %s)")
            params.extend([f'%{keyword}%', f'%{keyword}%'])

        if city:
            where_conditions.append("company_city = %s")
            params.append(city)

        if salary_range:
            if salary_range == '0-10':
                where_conditions.append(
                    "(REPLACE(job_salary, 'K', '') REGEXP '^[0-9]+-[0-9]+$' AND CAST(SUBSTRING_INDEX(REPLACE(job_salary, 'K', ''), '-', 1) AS UNSIGNED) < 10)")
            elif salary_range == '10-20':
                where_conditions.append(
                    "(REPLACE(job_salary, 'K', '') REGEXP '^[0-9]+-[0-9]+$' AND CAST(SUBSTRING_INDEX(REPLACE(job_salary, 'K', ''), '-', 1) AS UNSIGNED) >= 10 AND CAST(SUBSTRING_INDEX(REPLACE(job_salary, 'K', ''), '-', 1) AS UNSIGNED) < 20)")
            elif salary_range == '20-30':
                where_conditions.append(
                    "(REPLACE(job_salary, 'K', '') REGEXP '^[0-9]+-[0-9]+$' AND CAST(SUBSTRING_INDEX(REPLACE(job_salary, 'K', ''), '-', 1) AS UNSIGNED) >= 20 AND CAST(SUBSTRING_INDEX(REPLACE(job_salary, 'K', ''), '-', 1) AS UNSIGNED) < 30)")
            elif salary_range == '30+':
                where_conditions.append(
                    "(REPLACE(job_salary, 'K', '') REGEXP '^[0-9]+-[0-9]+$' AND CAST(SUBSTRING_INDEX(REPLACE(job_salary, 'K', ''), '-', 1) AS UNSIGNED) >= 30)")

        where_clause = " AND ".join(where_conditions)

        sql = f"""
        SELECT 
            job_title, job_salary, job_lable, job_company,
            job_company_tag, company_city, job_skill, keyword,
            crawl_time
        FROM bosszp 
        WHERE {where_clause}
        ORDER BY crawl_time DESC
        """

        cursor.execute(sql, params)
        jobs = cursor.fetchall()

        # 创建CSV输出
        output = io.StringIO()
        writer = csv.writer(output)

        # 写入表头
        writer.writerow(['职位名称', '薪资', '地区', '公司名称', '公司标签', '城市', '技能要求', '关键词', '爬取时间'])

        # 写入数据
        for job in jobs:
            writer.writerow([
                job['job_title'],
                job['job_salary'],
                job['job_lable'],
                job['job_company'],
                job['job_company_tag'],
                job['company_city'],
                job['job_skill'],
                job['keyword'],
                job['crawl_time'].strftime('%Y-%m-%d %H:%M:%S') if job['crawl_time'] else ''
            ])

        cursor.close()
        conn.close()

        # 创建响应
        response = make_response(output.getvalue())
        response.headers['Content-Disposition'] = 'attachment; filename=bosszp_data.csv'
        response.headers['Content-type'] = 'text/csv'

        return response

    except Exception as e:
        return jsonify({'success': False, 'message': str(e)}), 500

# 在AI助手API路由前添加调试信息
print("=== AI助手配置检查 ===")
print(f"SPARK_APP_ID: {SPARK_APP_ID}")
print(f"SPARK_API_KEY: {SPARK_API_KEY}")
print(f"SPARK_API_SECRET: {'*' * len(SPARK_API_SECRET)}")
print("=====================")
# 修改AI助手API路由
# 修改AI助手API路由
@app.route('/api/ai/ask', methods=['POST'])
@login_required
def ai_assistant():
    try:
        data = request.get_json()
        question = data.get('question', '').strip()
        if not question:
            return jsonify({'success': False, 'message': '问题不能为空'})

        # 初始化AI助手
        try:
            knowledge_base, ai_assistant_obj = init_ai_assistant(use_backup=False)
        except Exception as e:
            print(f"主AI初始化失败: {e}")
            knowledge_base, ai_assistant_obj = init_ai_assistant(use_backup=True)

        # 调用AI助手
        result = ai_assistant_obj.ask(question)

        # 确保返回格式正确
        if isinstance(result, dict):
            answer = result.get('answer', '')
            is_recruitment = result.get('is_recruitment', False)
        else:
            answer = str(result) if result else "抱歉，暂时无法回答这个问题"
            is_recruitment = False

        # 记录问答日志
        try:
            conn = get_db_connection()
            cursor = conn.cursor()
            cursor.execute("""
                INSERT INTO ai_conversations (user_id, question, answer, created_at) 
                VALUES (%s, %s, %s, NOW())
            """, (request.user_id, question, answer))
            conn.commit()
            cursor.close()
            conn.close()
        except Exception as e:
            print(f"记录AI对话失败: {e}")

        return jsonify({
            'success': True,
            'answer': answer,
            'is_recruitment': is_recruitment
        })

    except Exception as e:
        print(f"AI助手API错误: {e}")
        return jsonify({'success': False, 'message': f'AI助手暂时不可用: {str(e)}'}), 500
# 添加AI对话历史API
@app.route('/api/ai/conversations')
@login_required
def get_ai_conversations():
    try:
        user_id = request.user_id
        page = int(request.args.get('page', 1))
        per_page = int(request.args.get('per_page', 10))
        offset = (page - 1) * per_page

        conn = get_db_connection()
        cursor = conn.cursor(pymysql.cursors.DictCursor)

        cursor.execute("""
            SELECT id, question, answer, created_at 
            FROM ai_conversations 
            WHERE user_id = %s 
            ORDER BY created_at DESC 
            LIMIT %s OFFSET %s
        """, (user_id, per_page, offset))
        conversations = cursor.fetchall()

        cursor.execute("SELECT COUNT(*) as total FROM ai_conversations WHERE user_id = %s", (user_id,))
        total = cursor.fetchone()['total']

        cursor.close()
        conn.close()

        return jsonify({
            'success': True,
            'data': conversations,
            'pagination': {
                'page': page,
                'per_page': per_page,
                'total': total,
                'pages': (total + per_page - 1) // per_page
            }
        })

    except Exception as e:
        return jsonify({'success': False, 'message': str(e)}), 500

# 健康检查路由
@app.route('/api/health')
def health_check():
    return jsonify({'status': 'ok', 'message': 'Server is running'})


if __name__ == '__main__':
    # 创建模板和静态文件目录
    os.makedirs('templates', exist_ok=True)
    os.makedirs('static', exist_ok=True)

    print("启动Boss直聘数据展示系统...")
    print("访问地址: http://localhost:5000")
    print("API文档: http://localhost:5000/api/health")

    app.run(debug=True, host='0.0.0.0', port=5000)