"""
JH子系统 - 职位数据服务模块
提供职位数据查询、统计和分析功能
"""

import sqlite3
import asyncio
import random
import os
from datetime import datetime, timedelta
from typing import List, Dict, Any, Optional
from dataclasses import dataclass
from pathlib import Path
import json

@dataclass
class JobDataFilters:
    """职位数据过滤条件"""
    location: Optional[str] = None
    company_name: Optional[str] = None
    job_title: Optional[str] = None
    job_type: Optional[str] = None
    country: Optional[str] = None
    schedule_type: Optional[str] = None
    work_from_home: Optional[bool] = None
    search_query: Optional[str] = None
    salary_min: Optional[float] = None
    salary_max: Optional[float] = None
    has_health_insurance: Optional[bool] = None
    skills: Optional[List[str]] = None

class JobDataService:
    """职位数据服务类"""
    
    def __init__(self, db_path: str = "jh_job_analytics.db"):
        self.db_path = db_path
        self.project_root = Path(__file__).parent.parent.parent
        self.job_data_file = self.project_root / "data" / "job_info" / "job_data.json"
        self._ensure_db_exists()
    
    def _ensure_db_exists(self):
        """确保数据库存在并创建表结构"""
        with sqlite3.connect(self.db_path) as conn:
            # 创建jobs表
            conn.execute('''
            CREATE TABLE IF NOT EXISTS jobs (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                job_title_short TEXT,
                job_title TEXT,
                job_location TEXT,
                job_via TEXT,
                job_schedule_type TEXT,
                job_work_from_home BOOLEAN,
                search_location TEXT,
                job_posted_date TEXT,
                job_no_degree_mention BOOLEAN,
                job_health_insurance BOOLEAN,
                job_country TEXT,
                salary_rate TEXT,
                salary_year_avg REAL,
                salary_hour_avg REAL,
                company_name TEXT,
                job_skills TEXT,
                job_type_skills TEXT,
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
            ''')
            
            # 创建统计表
            conn.execute('''
            CREATE TABLE IF NOT EXISTS job_statistics (
                id INTEGER PRIMARY KEY,
                total_jobs INTEGER,
                avg_salary REAL,
                max_salary REAL,
                min_salary REAL,
                remote_jobs INTEGER,
                total_companies INTEGER,
                updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
            ''')
            
            conn.commit()

    async def initialize_database(self, use_real_data: bool = True):
        """初始化数据库数据"""
        with sqlite3.connect(self.db_path) as conn:
            # 检查是否已有数据
            cursor = conn.execute('SELECT COUNT(*) FROM jobs')
            count = cursor.fetchone()[0]
            
            if count == 0:
                if use_real_data and self.job_data_file.exists():
                    print(f"导入真实职位数据: {self.job_data_file}")
                    await self._import_real_job_data()
                else:
                    print("使用虚拟数据")
                    await self._generate_mock_data()
            else:
                print(f"数据库已包含 {count} 条职位记录")
                
        # 更新统计信息
        await self._update_statistics()

    async def _import_real_job_data(self):
        """导入真实的job_data.json数据"""
        try:
            print(f"开始导入数据文件: {self.job_data_file}")
            
            # 分批读取大文件，避免内存溢出
            batch_size = 1000
            total_imported = 0
            
            with open(self.job_data_file, 'r', encoding='utf-8') as f:
                # 逐行读取JSON数组
                content = f.read()
                jobs_data = json.loads(content)
                
                print(f"发现 {len(jobs_data)} 条职位记录，开始导入...")
                
                with sqlite3.connect(self.db_path) as conn:
                    # 清空现有数据
                    conn.execute('DELETE FROM jobs')
                    
                    # 批量插入数据
                    for i in range(0, len(jobs_data), batch_size):
                        batch = jobs_data[i:i + batch_size]
                        batch_jobs = []
                        
                        for job in batch:
                            # 处理数据类型转换
                            job_data = (
                                job.get('job_title_short'),
                                job.get('job_title'),
                                job.get('job_location'),
                                job.get('job_via'),
                                job.get('job_schedule_type'),
                                bool(job.get('job_work_from_home', False)),
                                job.get('search_location'),
                                job.get('job_posted_date'),
                                bool(job.get('job_no_degree_mention', False)),
                                bool(job.get('job_health_insurance', False)),
                                job.get('job_country'),
                                job.get('salary_rate'),
                                float(job.get('salary_year_avg', 0)) if job.get('salary_year_avg') else None,
                                float(job.get('salary_hour_avg', 0)) if job.get('salary_hour_avg') else None,
                                job.get('company_name'),
                                job.get('job_skills'),
                                job.get('job_type_skills')
                            )
                            batch_jobs.append(job_data)
                        
                        # 插入批次数据
                        conn.executemany('''
                        INSERT INTO jobs (
                            job_title_short, job_title, job_location, job_via,
                            job_schedule_type, job_work_from_home, search_location,
                            job_posted_date, job_no_degree_mention, job_health_insurance,
                            job_country, salary_rate, salary_year_avg, salary_hour_avg,
                            company_name, job_skills, job_type_skills
                        ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
                        ''', batch_jobs)
                        
                        total_imported += len(batch_jobs)
                        if total_imported % 10000 == 0:
                            print(f"已导入 {total_imported} 条记录...")
                    
                    conn.commit()
                    print(f"✅ 成功导入 {total_imported} 条真实职位数据")
                    
        except Exception as e:
            print(f"❌ 导入真实数据失败: {e}")
            print("降级到虚拟数据模式")
            await self._generate_mock_data()
    
    async def _generate_mock_data(self):
        """生成虚拟职位数据"""
        # 不再生成虚拟数据，直接使用真实数据
        print("⚠️ 跳过虚拟数据生成，建议使用真实数据")
        print("提示：数据库中已有真实职位数据，无需生成虚拟数据")
        return
    
    def _create_mock_jobs(self, count: int) -> List[tuple]:
        """创建虚拟职位数据"""
        
        # 定义基础数据
        job_titles_short = [
            "Python开发工程师", "数据科学家", "AI工程师", "前端工程师", "后端工程师",
            "全栈工程师", "数据分析师", "机器学习工程师", "DevOps工程师", "产品经理",
            "UI/UX设计师", "测试工程师", "算法工程师", "架构师", "技术总监"
        ]
        
        job_titles_full = {
            "Python开发工程师": ["Python后端开发工程师", "Python全栈开发工程师", "高级Python工程师"],
            "数据科学家": ["高级数据科学家", "数据科学专家", "数据科学总监"],
            "AI工程师": ["人工智能工程师", "深度学习工程师", "NLP算法工程师"],
            "前端工程师": ["React前端工程师", "Vue.js前端工程师", "高级前端工程师"],
            "后端工程师": ["Java后端工程师", "Go后端工程师", "Node.js后端工程师"],
            "全栈工程师": ["全栈开发工程师", "高级全栈工程师", "全栈技术专家"],
            "数据分析师": ["高级数据分析师", "业务数据分析师", "产品数据分析师"],
            "机器学习工程师": ["深度学习工程师", "计算机视觉工程师", "推荐算法工程师"],
            "DevOps工程师": ["云平台工程师", "运维开发工程师", "基础设施工程师"],
            "产品经理": ["高级产品经理", "AI产品经理", "数据产品经理"],
            "UI/UX设计师": ["交互设计师", "视觉设计师", "用户体验设计师"],
            "测试工程师": ["自动化测试工程师", "性能测试工程师", "质量保证工程师"],
            "算法工程师": ["推荐算法工程师", "搜索算法工程师", "风控算法工程师"],
            "架构师": ["系统架构师", "技术架构师", "解决方案架构师"],
            "技术总监": ["研发总监", "技术副总裁", "首席技术官"]
        }
        
        locations = [
            "北京", "上海", "深圳", "杭州", "广州", "成都", "武汉", "西安",
            "南京", "苏州", "天津", "重庆", "青岛", "大连", "厦门"
        ]
        
        companies = [
            "阿里巴巴", "腾讯", "百度", "字节跳动", "美团", "京东", "滴滴出行",
            "华为", "小米", "网易", "新浪", "搜狐", "360", "快手", "拼多多",
            "蚂蚁金服", "携程", "去哪儿", "爱奇艺", "优酷", "B站", "知乎"
        ]
        
        job_vias = ["拉勾网", "BOSS直聘", "智联招聘", "前程无忧", "猎聘网", "LinkedIn"]
        
        schedule_types = ["全职", "兼职", "实习", "合同工"]
        
        skills_by_role = {
            "Python开发工程师": ["Python", "Django", "Flask", "MySQL", "Redis", "Docker"],
            "数据科学家": ["Python", "R", "SQL", "机器学习", "深度学习", "TensorFlow", "PyTorch"],
            "AI工程师": ["Python", "TensorFlow", "PyTorch", "深度学习", "计算机视觉", "NLP"],
            "前端工程师": ["JavaScript", "React", "Vue.js", "HTML", "CSS", "TypeScript"],
            "后端工程师": ["Java", "Spring", "MySQL", "Redis", "Kafka", "微服务"],
            "全栈工程师": ["JavaScript", "Python", "React", "Node.js", "MySQL", "MongoDB"],
            "数据分析师": ["SQL", "Python", "Excel", "Tableau", "Power BI", "统计学"],
            "机器学习工程师": ["Python", "机器学习", "深度学习", "TensorFlow", "Scikit-learn"],
            "DevOps工程师": ["Docker", "Kubernetes", "AWS", "Jenkins", "Git", "Linux"],
            "产品经理": ["产品设计", "需求分析", "项目管理", "用户研究", "数据分析"],
            "UI/UX设计师": ["Sketch", "Figma", "Photoshop", "Illustrator", "用户体验设计"],
            "测试工程师": ["自动化测试", "Selenium", "Appium", "性能测试", "接口测试"],
            "算法工程师": ["Python", "机器学习", "推荐算法", "数据挖掘", "分布式计算"],
            "架构师": ["系统设计", "微服务", "分布式系统", "高并发", "性能优化"],
            "技术总监": ["团队管理", "技术规划", "架构设计", "项目管理", "技术选型"]
        }
        
        # 生成虚拟数据
        jobs = []
        for i in range(count):
            job_title_short = random.choice(job_titles_short)
            job_title = random.choice(job_titles_full[job_title_short])
            location = random.choice(locations)
            company = random.choice(companies)
            
            # 根据职位类型生成薪资
            salary_ranges = {
                "Python开发工程师": (15000, 35000),
                "数据科学家": (20000, 45000),
                "AI工程师": (25000, 50000),
                "前端工程师": (12000, 30000),
                "后端工程师": (15000, 35000),
                "全栈工程师": (18000, 40000),
                "数据分析师": (12000, 28000),
                "机器学习工程师": (20000, 45000),
                "DevOps工程师": (18000, 38000),
                "产品经理": (15000, 35000),
                "UI/UX设计师": (10000, 25000),
                "测试工程师": (10000, 25000),
                "算法工程师": (20000, 45000),
                "架构师": (30000, 60000),
                "技术总监": (40000, 80000)
            }
            
            min_salary, max_salary = salary_ranges.get(job_title_short, (10000, 30000))
            salary_year_avg = random.uniform(min_salary, max_salary)
            salary_hour_avg = salary_year_avg / (22 * 8)  # 假设每月22个工作日，每天8小时
            
            # 生成其他字段
            posted_date = (datetime.now() - timedelta(days=random.randint(1, 90))).strftime('%Y-%m-%d')
            skills = random.sample(skills_by_role.get(job_title_short, ["Python", "SQL"]), 
                                 random.randint(3, 6))
            
            job_data = (
                job_title_short,  # job_title_short
                job_title,        # job_title
                location,         # job_location
                random.choice(job_vias),  # job_via
                random.choice(schedule_types),  # job_schedule_type
                random.choice([True, False]),   # job_work_from_home
                location,         # search_location
                posted_date,      # job_posted_date
                random.choice([True, False]),   # job_no_degree_mention
                random.choice([True, False]),   # job_health_insurance
                "中国",           # job_country
                "年薪",           # salary_rate
                salary_year_avg,  # salary_year_avg
                salary_hour_avg,  # salary_hour_avg
                company,          # company_name
                ",".join(skills), # job_skills
                job_title_short,  # job_type_skills
            )
            
            jobs.append(job_data)
        
        return jobs
    
    async def _update_statistics(self):
        """更新统计数据"""
        with sqlite3.connect(self.db_path) as conn:
            # 创建统计表如果不存在
            conn.execute('''
            CREATE TABLE IF NOT EXISTS skill_stats (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                skill_name TEXT UNIQUE,
                job_count INTEGER,
                avg_salary REAL,
                created_date TEXT DEFAULT CURRENT_TIMESTAMP
            )
            ''')
            
            conn.execute('''
            CREATE TABLE IF NOT EXISTS location_stats (
                id INTEGER PRIMARY KEY AUTOINCREMENT,
                location TEXT UNIQUE,
                job_count INTEGER,
                avg_salary REAL,
                created_date TEXT DEFAULT CURRENT_TIMESTAMP
            )
            ''')
            
            # 更新技能统计 - 简化版，避免JSON函数问题
            conn.execute("DELETE FROM skill_stats")
            
            # 直接从技能字段统计 (假设技能用逗号分隔)
            skill_query = '''
            WITH skill_split AS (
                SELECT 
                    TRIM(
                        SUBSTR(
                            job_skills, 
                            CASE WHEN INSTR(job_skills, ',') = 0 THEN 1 
                                 ELSE 1 END,
                            CASE WHEN INSTR(job_skills, ',') = 0 THEN LENGTH(job_skills)
                                 ELSE INSTR(job_skills, ',') - 1 END
                        )
                    ) as skill_name,
                    salary_year_avg
                FROM jobs 
                WHERE job_skills IS NOT NULL AND job_skills != ''
                UNION ALL
                SELECT 
                    TRIM(
                        SUBSTR(
                            job_skills, 
                            INSTR(job_skills, ',') + 1
                        )
                    ) as skill_name,
                    salary_year_avg
                FROM jobs 
                WHERE job_skills IS NOT NULL 
                AND INSTR(job_skills, ',') > 0
                AND LENGTH(TRIM(SUBSTR(job_skills, INSTR(job_skills, ',') + 1))) > 0
            )
            INSERT INTO skill_stats (skill_name, job_count, avg_salary)
            SELECT 
                skill_name,
                COUNT(*) as job_count,
                AVG(salary_year_avg) as avg_salary
            FROM skill_split
            WHERE skill_name != '' AND skill_name IS NOT NULL
            GROUP BY skill_name
            ORDER BY job_count DESC
            '''
            
            # 更简单的技能统计方法
            conn.execute('''
            INSERT INTO skill_stats (skill_name, job_count, avg_salary)
            SELECT 
                'Python' as skill_name,
                COUNT(*) as job_count,
                AVG(salary_year_avg) as avg_salary
            FROM jobs 
            WHERE job_skills LIKE '%Python%' OR job_title LIKE '%Python%'
            UNION
            SELECT 
                'Java' as skill_name,
                COUNT(*) as job_count,
                AVG(salary_year_avg) as avg_salary
            FROM jobs 
            WHERE job_skills LIKE '%Java%' OR job_title LIKE '%Java%'
            UNION
            SELECT 
                'JavaScript' as skill_name,
                COUNT(*) as job_count,
                AVG(salary_year_avg) as avg_salary
            FROM jobs 
            WHERE job_skills LIKE '%JavaScript%' OR job_title LIKE '%JavaScript%'
            UNION
            SELECT 
                '机器学习' as skill_name,
                COUNT(*) as job_count,
                AVG(salary_year_avg) as avg_salary
            FROM jobs 
            WHERE job_skills LIKE '%机器学习%' OR job_title LIKE '%机器学习%'
            UNION
            SELECT 
                '数据分析' as skill_name,
                COUNT(*) as job_count,
                AVG(salary_year_avg) as avg_salary
            FROM jobs 
            WHERE job_skills LIKE '%数据分析%' OR job_title LIKE '%数据分析%'
            ''')
            
            # 更新地区统计
            conn.execute("DELETE FROM location_stats")
            conn.execute('''
            INSERT INTO location_stats (job_location, job_count, avg_salary)
            SELECT 
                job_location,
                COUNT(*) as job_count,
                AVG(salary_year_avg) as avg_salary
            FROM jobs
            WHERE job_location IS NOT NULL AND job_location != ''
            GROUP BY job_location
            ORDER BY job_count DESC
            ''')
            
            conn.commit()
    
    async def get_overview_statistics(self) -> Dict[str, Any]:
        """获取概览统计信息"""
        # 确保统计表已创建并有数据
        try:
            with sqlite3.connect(self.db_path) as conn:
                cursor = conn.execute("SELECT COUNT(*) FROM skill_stats")
                skill_count = cursor.fetchone()[0]
                if skill_count == 0:
                    await self._update_statistics()
        except:
            await self._update_statistics()
        
        with sqlite3.connect(self.db_path) as conn:
            # 基础统计
            cursor = conn.execute('''
            SELECT 
                COUNT(*) as total_jobs,
                AVG(salary_year_avg) as avg_salary,
                MIN(salary_year_avg) as min_salary,
                MAX(salary_year_avg) as max_salary,
                COUNT(CASE WHEN job_work_from_home = 1 THEN 1 END) as remote_jobs
            FROM jobs
            WHERE salary_year_avg IS NOT NULL
            ''')
            basic_stats = cursor.fetchone()
            
            # 获取公司总数
            cursor = conn.execute('SELECT COUNT(DISTINCT company_name) FROM jobs WHERE company_name IS NOT NULL')
            total_companies = cursor.fetchone()[0]
            
            # 热门技能
            try:
                cursor = conn.execute('''
                SELECT skill_name, job_count, avg_salary
                FROM skill_stats
                ORDER BY job_count DESC
                LIMIT 10
                ''')
                top_skills = cursor.fetchall()
            except:
                top_skills = []
            
            # 热门地区
            try:
                cursor = conn.execute('''
                SELECT job_location, job_count, avg_salary
                FROM location_stats
                ORDER BY job_count DESC
                LIMIT 10
                ''')
                top_locations = cursor.fetchall()
            except:
                top_locations = []
            
            # 薪资分布
            cursor = conn.execute('''
            SELECT 
                CASE 
                    WHEN salary_year_avg < 15000 THEN '15K以下'
                    WHEN salary_year_avg < 25000 THEN '15K-25K'
                    WHEN salary_year_avg < 35000 THEN '25K-35K'
                    WHEN salary_year_avg < 50000 THEN '35K-50K'
                    ELSE '50K以上'
                END as salary_range,
                COUNT(*) as count
            FROM jobs
            GROUP BY salary_range
            ORDER BY MIN(salary_year_avg)
            ''')
            salary_distribution = cursor.fetchall()
            
            return {
                "total_jobs": basic_stats[0],
                "total_companies": total_companies,
                "top_location": top_locations[0][0] if top_locations else "N/A",
                "avg_salary": round(basic_stats[1], 2) if basic_stats[1] else 0,
                "min_salary": round(basic_stats[2], 2) if basic_stats[2] else 0,
                "max_salary": round(basic_stats[3], 2) if basic_stats[3] else 0,
                "remote_jobs": basic_stats[4],
                "remote_percentage": round((basic_stats[4] / basic_stats[0]) * 100, 1) if basic_stats[0] > 0 else 0,
                "top_skills": [
                    {
                        "skill": skill[0],
                        "job_count": skill[1],
                        "avg_salary": round(skill[2], 2) if skill[2] else 0
                    }
                    for skill in top_skills
                ],
                "top_locations": [
                    {
                        "location": loc[0],
                        "job_count": loc[1],
                        "avg_salary": round(loc[2], 2) if loc[2] else 0
                    }
                    for loc in top_locations
                ],
                "salary_distribution": [
                    {
                        "range": dist[0],
                        "count": dist[1]
                    }
                    for dist in salary_distribution
                ]
            }
    
    async def get_jobs_paginated(
        self, 
        page: int = 1, 
        page_size: int = 20, 
        filters: Optional[JobDataFilters] = None
    ) -> Dict[str, Any]:
        """获取分页职位数据"""
        if filters is None:
            filters = JobDataFilters()
        
        # 构建基础查询
        base_query = "SELECT * FROM jobs WHERE 1=1"
        count_query = "SELECT COUNT(*) FROM jobs WHERE 1=1"
        params = []
        
        # 应用筛选条件
        filter_conditions = ""
        
        if filters.job_type:
            filter_conditions += " AND job_title_short LIKE ?"
            params.append(f"%{filters.job_type}%")
        
        if filters.location:
            filter_conditions += " AND job_location LIKE ?"
            params.append(f"%{filters.location}%")
        
        if filters.country:
            filter_conditions += " AND job_country LIKE ?"
            params.append(f"%{filters.country}%")
        
        if filters.schedule_type:
            filter_conditions += " AND job_schedule_type LIKE ?"
            params.append(f"%{filters.schedule_type}%")
        
        if filters.work_from_home is not None:
            filter_conditions += " AND job_work_from_home = ?"
            params.append(filters.work_from_home)
        
        if filters.search_query:
            filter_conditions += " AND (job_title LIKE ? OR job_title_short LIKE ? OR company_name LIKE ? OR job_skills LIKE ?)"
            search_param = f"%{filters.search_query}%"
            params.extend([search_param, search_param, search_param, search_param])
        
        if filters.salary_min:
            filter_conditions += " AND salary_year_avg >= ?"
            params.append(filters.salary_min)
        
        if filters.salary_max:
            filter_conditions += " AND salary_year_avg <= ?"
            params.append(filters.salary_max)
        
        if filters.company_name:
            filter_conditions += " AND company_name LIKE ?"
            params.append(f"%{filters.company_name}%")
        
        if filters.has_health_insurance is not None:
            filter_conditions += " AND job_health_insurance = ?"
            params.append(filters.has_health_insurance)
        
        if filters.skills:
            for skill in filters.skills:
                filter_conditions += " AND job_skills LIKE ?"
                params.append(f"%{skill}%")
        
        # 添加筛选条件
        base_query += filter_conditions
        count_query += filter_conditions
        
        with sqlite3.connect(self.db_path) as conn:
            conn.row_factory = sqlite3.Row
            
            # 获取总数
            cursor = conn.execute(count_query, params)
            total_items = cursor.fetchone()[0]
            
            # 计算分页
            total_pages = (total_items + page_size - 1) // page_size
            offset = (page - 1) * page_size
            
            # 获取分页数据
            paginated_query = base_query + " ORDER BY job_posted_date DESC LIMIT ? OFFSET ?"
            cursor = conn.execute(paginated_query, params + [page_size, offset])
            
            jobs = []
            for row in cursor.fetchall():
                job = dict(row)
                # 解析技能字符串为列表
                if job['job_skills']:
                    try:
                        # 处理可能的JSON格式或逗号分隔格式
                        if job['job_skills'].startswith('['):
                            import ast
                            job['job_skills'] = ast.literal_eval(job['job_skills'])
                        else:
                            job['job_skills'] = [skill.strip() for skill in job['job_skills'].split(',')]
                    except:
                        job['job_skills'] = [job['job_skills']] if job['job_skills'] else []
                else:
                    job['job_skills'] = []
                jobs.append(job)
            
            return {
                "jobs": jobs,
                "pagination": {
                    "page": page,
                    "page_size": page_size,
                    "total_items": total_items,
                    "total_pages": total_pages,
                    "has_next": page < total_pages,
                    "has_prev": page > 1
                }
            }

    async def get_job_by_id(self, job_id: int) -> Optional[Dict[str, Any]]:
        """根据ID获取单个岗位详细信息"""
        query = "SELECT * FROM jobs WHERE id = ?"
        
        with sqlite3.connect(self.db_path) as conn:
            conn.row_factory = sqlite3.Row
            cursor = conn.execute(query, [job_id])
            row = cursor.fetchone()
            
            if row:
                job = dict(row)
                # 解析技能字符串为列表
                if job['job_skills']:
                    try:
                        # 尝试解析JSON格式的技能
                        import json
                        job['job_skills'] = json.loads(job['job_skills'])
                    except (json.JSONDecodeError, TypeError):
                        # 如果不是JSON格式，按逗号分割
                        job['job_skills'] = [skill.strip() for skill in job['job_skills'].split(',') if skill.strip()]
                else:
                    job['job_skills'] = []
                
                # 格式化日期
                if job['job_posted_date']:
                    try:
                        from datetime import datetime
                        # 假设日期格式为 YYYY-MM-DD 或 YYYY-MM-DD HH:MM:SS
                        if ' ' in job['job_posted_date']:
                            job['job_posted_date_formatted'] = datetime.strptime(
                                job['job_posted_date'], '%Y-%m-%d %H:%M:%S'
                            ).strftime('%Y年%m月%d日')
                        else:
                            job['job_posted_date_formatted'] = datetime.strptime(
                                job['job_posted_date'], '%Y-%m-%d'
                            ).strftime('%Y年%m月%d日')
                    except:
                        job['job_posted_date_formatted'] = job['job_posted_date']
                
                return job
            
            return None

    async def search_jobs(self, filters: JobDataFilters, limit: int = 50) -> List[Dict[str, Any]]:
        """搜索职位数据"""
        query = "SELECT * FROM jobs WHERE 1=1"
        params = []
        
        if filters.location:
            query += " AND job_location LIKE ?"
            params.append(f"%{filters.location}%")
        
        if filters.company_name:
            query += " AND company_name LIKE ?"
            params.append(f"%{filters.company_name}%")
        
        if filters.job_title:
            query += " AND (job_title LIKE ? OR job_title_short LIKE ?)"
            params.extend([f"%{filters.job_title}%", f"%{filters.job_title}%"])
        
        if filters.salary_min:
            query += " AND salary_year_avg >= ?"
            params.append(filters.salary_min)
        
        if filters.salary_max:
            query += " AND salary_year_avg <= ?"
            params.append(filters.salary_max)
        
        if filters.work_from_home is not None:
            query += " AND job_work_from_home = ?"
            params.append(filters.work_from_home)
        
        if hasattr(filters, 'has_health_insurance') and filters.has_health_insurance is not None:
            query += " AND job_health_insurance = ?"
            params.append(filters.has_health_insurance)
        
        if filters.skills:
            for skill in filters.skills:
                query += " AND job_skills LIKE ?"
                params.append(f"%{skill}%")
        
        query += f" ORDER BY job_posted_date DESC LIMIT {limit}"
        
        with sqlite3.connect(self.db_path) as conn:
            conn.row_factory = sqlite3.Row
            cursor = conn.execute(query, params)
            
            jobs = []
            for row in cursor.fetchall():
                job = dict(row)
                # 解析技能字符串为列表
                if job['job_skills']:
                    job['job_skills'] = [skill.strip() for skill in job['job_skills'].split(',')]
                else:
                    job['job_skills'] = []
                jobs.append(job)
            
            return jobs
    
    async def get_salary_trends(self, skill: str = None, location: str = None) -> Dict[str, Any]:
        """获取薪资趋势数据"""
        query = '''
        SELECT 
            job_title_short,
            AVG(salary_year_avg) as avg_salary,
            COUNT(*) as job_count
        FROM jobs
        WHERE 1=1
        '''
        params = []
        
        if skill:
            query += " AND job_skills LIKE ?"
            params.append(f"%{skill}%")
        
        if location:
            query += " AND job_location LIKE ?"
            params.append(f"%{location}%")
        
        query += " GROUP BY job_title_short ORDER BY avg_salary DESC"
        
        with sqlite3.connect(self.db_path) as conn:
            cursor = conn.execute(query, params)
            trends = [
                {
                    "job_title": row[0],
                    "avg_salary": round(row[1], 2),
                    "job_count": row[2]
                }
                for row in cursor.fetchall()
            ]
            
            return {
                "trends": trends,
                "filter_skill": skill,
                "filter_location": location
            }
    
    async def get_market_insights(self) -> Dict[str, Any]:
        """获取市场洞察信息"""
        insights = {}
        
        with sqlite3.connect(self.db_path) as conn:
            # 1. 最受欢迎的技能组合
            cursor = conn.execute('''
            SELECT job_skills, COUNT(*) as freq, AVG(salary_year_avg) as avg_salary
            FROM jobs
            WHERE job_skills IS NOT NULL
            GROUP BY job_skills
            HAVING COUNT(*) >= 2
            ORDER BY freq DESC
            LIMIT 10
            ''')
            
            skill_combinations = [
                {
                    "skills": row[0].split(',') if row[0] else [],
                    "frequency": row[1],
                    "avg_salary": round(row[2], 2) if row[2] else 0
                }
                for row in cursor.fetchall()
            ]
            
            # 2. 高薪职位特征分析
            cursor = conn.execute('''
            SELECT 
                job_work_from_home,
                job_health_insurance,
                job_no_degree_mention,
                AVG(salary_year_avg) as avg_salary,
                COUNT(*) as count
            FROM jobs
            GROUP BY job_work_from_home, job_health_insurance, job_no_degree_mention
            ORDER BY avg_salary DESC
            ''')
            
            high_salary_features = []
            for row in cursor.fetchall():
                features = []
                if row[0]: features.append("远程工作")
                if row[1]: features.append("包含健康保险")
                if row[2]: features.append("不要求学历")
                
                high_salary_features.append({
                    "features": features,
                    "avg_salary": round(row[3], 2),
                    "job_count": row[4]
                })
            
            # 3. 技能需求增长趋势（基于发布日期模拟）
            cursor = conn.execute('''
            SELECT 
                SUBSTR(job_posted_date, 1, 7) as month,
                COUNT(*) as job_count
            FROM jobs
            WHERE job_posted_date >= date('now', '-6 months')
            GROUP BY month
            ORDER BY month
            ''')
            
            monthly_trends = [
                {
                    "month": row[0],
                    "job_count": row[1]
                }
                for row in cursor.fetchall()
            ]
            
            insights = {
                "popular_skill_combinations": skill_combinations,
                "high_salary_features": high_salary_features,
                "monthly_job_trends": monthly_trends,
                "generated_at": datetime.now().isoformat()
            }
        
        return insights
    
    async def get_chart_data(self, chart_type: str, dimension: str = None, limit: int = None) -> Dict[str, Any]:
        """获取图表数据"""
        if not os.path.exists(self.db_path):
            await self.initialize_database()
        
        with sqlite3.connect(self.db_path) as conn:
            if chart_type == "jobTypeChart":
                return await self._get_job_type_chart_data(conn, limit)
            elif chart_type == "locationChart":
                return await self._get_location_chart_data(conn, limit)
            elif chart_type == "salaryChart":
                return await self._get_salary_chart_data(conn, limit)
            elif chart_type == "timeChart":
                return await self._get_time_chart_data(conn, dimension)
            elif chart_type == "skillsChart":
                return await self._get_skills_chart_data(conn, limit)
            elif chart_type == "companyChart":
                return await self._get_company_chart_data(conn, limit)
            else:
                raise ValueError(f"不支持的图表类型: {chart_type}")
    
    async def _get_job_type_chart_data(self, conn, limit: int = None) -> Dict[str, Any]:
        """获取职位类型分布数据"""
        query = '''
        SELECT job_title_short, COUNT(*) as count
        FROM jobs
        GROUP BY job_title_short
        ORDER BY count DESC
        '''
        if limit:
            query += f" LIMIT {limit}"
        
        cursor = conn.execute(query)
        results = cursor.fetchall()
        
        return {
            "labels": [row[0] for row in results],
            "values": [row[1] for row in results],
            "type": "职位类型分布"
        }
    
    async def _get_location_chart_data(self, conn, limit: int = None) -> Dict[str, Any]:
        """获取地区分布数据"""
        query = '''
        SELECT job_location, COUNT(*) as count
        FROM jobs
        GROUP BY job_location
        ORDER BY count DESC
        '''
        if limit:
            query += f" LIMIT {limit}"
        
        cursor = conn.execute(query)
        results = cursor.fetchall()
        
        return {
            "labels": [row[0] for row in results],
            "values": [row[1] for row in results],
            "type": "地区分布"
        }
    
    async def _get_salary_chart_data(self, conn, limit: int = None) -> Dict[str, Any]:
        """获取薪资分布数据"""
        query = '''
        SELECT 
            CASE 
                WHEN salary_year_avg < 15000 THEN '0-1.5万'
                WHEN salary_year_avg < 25000 THEN '1.5-2.5万'
                WHEN salary_year_avg < 35000 THEN '2.5-3.5万'
                WHEN salary_year_avg < 50000 THEN '3.5-5万'
                ELSE '5万以上'
            END as salary_range,
            COUNT(*) as count
        FROM jobs
        WHERE salary_year_avg IS NOT NULL
        GROUP BY 1
        ORDER BY MIN(salary_year_avg)
        '''
        
        cursor = conn.execute(query)
        results = cursor.fetchall()
        
        return {
            "labels": [row[0] for row in results],
            "values": [row[1] for row in results],
            "type": "薪资分布"
        }
    
    async def _get_time_chart_data(self, conn, dimension: str = "daily") -> Dict[str, Any]:
        """获取时间趋势数据"""
        if dimension == "daily":
            # 获取2023年最后30天的数据
            query = '''
            SELECT date(job_posted_date) as day, COUNT(*) as count
            FROM jobs
            WHERE date(job_posted_date) >= '2023-12-01'
            GROUP BY day
            ORDER BY day
            '''
        elif dimension == "weekly":
            # 获取2023年按周统计的数据
            query = '''
            SELECT strftime('%Y', job_posted_date) || '年第' || strftime('%W', job_posted_date) || '周' as week, COUNT(*) as count
            FROM jobs
            WHERE strftime('%Y', job_posted_date) = '2023'
            GROUP BY strftime('%Y-%W', job_posted_date)
            ORDER BY strftime('%Y-%W', job_posted_date)
            LIMIT 20
            '''
        elif dimension == "monthly":
            # 获取2023年按月统计的数据
            query = '''
            SELECT strftime('%Y', job_posted_date) || '年' || strftime('%m', job_posted_date) || '月' as month, COUNT(*) as count
            FROM jobs
            WHERE strftime('%Y', job_posted_date) = '2023'
            GROUP BY strftime('%Y-%m', job_posted_date)
            ORDER BY strftime('%Y-%m', job_posted_date)
            '''
        else:
            # 默认获取2023年最后7天的数据
            query = '''
            SELECT date(job_posted_date) as day, COUNT(*) as count
            FROM jobs
            WHERE date(job_posted_date) >= '2023-12-24'
            GROUP BY day
            ORDER BY day
            '''
        
        cursor = conn.execute(query)
        results = cursor.fetchall()
        
        # 格式化标签
        labels = []
        values = []
        for row in results:
            labels.append(row[0])
            values.append(row[1])
        
        return {
            "labels": labels,
            "values": values,
            "type": "时间趋势"
        }
    
    async def _get_skills_chart_data(self, conn, limit: int = None) -> Dict[str, Any]:
        """获取技能分布数据"""
        # 先获取所有技能数据
        cursor = conn.execute('SELECT job_skills FROM jobs WHERE job_skills IS NOT NULL')
        skill_counts = {}
        
        for row in cursor.fetchall():
            skills = row[0].split(',') if row[0] else []
            for skill in skills:
                skill = skill.strip()
                if skill:
                    skill_counts[skill] = skill_counts.get(skill, 0) + 1
        
        # 排序并应用限制
        sorted_skills = sorted(skill_counts.items(), key=lambda x: x[1], reverse=True)
        if limit:
            sorted_skills = sorted_skills[:limit]
        
        return {
            "labels": [skill[0] for skill in sorted_skills],
            "values": [skill[1] for skill in sorted_skills],
            "type": "技能分布"
        }
    
    async def _get_company_chart_data(self, conn, limit: int = None) -> Dict[str, Any]:
        """获取公司招聘数量数据"""
        query = '''
        SELECT company_name, COUNT(*) as count, AVG(salary_year_avg) as avg_salary
        FROM jobs
        GROUP BY company_name
        ORDER BY count DESC
        '''
        if limit:
            query += f" LIMIT {limit}"
        
        cursor = conn.execute(query)
        results = cursor.fetchall()
        
        return {
            "labels": [row[0] for row in results],
            "values": [row[1] for row in results],
            "extra": [round(row[2], 0) if row[2] else 0 for row in results],
            "type": "公司招聘数量"
        }

# 创建全局实例
job_data_service = JobDataService() 