"""
JH子系统 API路由
提供职位数据可视化相关的API接口
"""

from fastapi import APIRouter, HTTPException, Query, Depends, File, UploadFile, Form
from typing import Dict, List, Any, Optional
from pydantic import BaseModel
import io
import PyPDF2
import docx
from bs4 import BeautifulSoup

from ....jh_subsystem.job_data_service import job_data_service, JobDataFilters

router = APIRouter(prefix="/jh", tags=["JH职位数据分析"])

class JobSearchRequest(BaseModel):
    """职位搜索请求"""
    query: Optional[str] = None
    job_type: Optional[str] = None
    location: Optional[str] = None
    country: Optional[str] = None
    schedule_type: Optional[str] = None
    work_from_home: Optional[bool] = None
    salary_min: Optional[float] = None
    salary_max: Optional[float] = None
    page: int = 1
    page_size: int = 20

class ManualResumeInput(BaseModel):
    """手动输入简历信息"""
    name: str
    email: Optional[str] = None
    phone: Optional[str] = None
    education: Optional[str] = None
    experience: Optional[str] = None
    skills: List[str] = []
    summary: Optional[str] = None
    target_position: Optional[str] = None
    expected_salary: Optional[str] = None

class ResumeAnalysisResult(BaseModel):
    """简历分析结果"""
    skill_matches: Dict[str, float]
    competitiveness_score: float
    skill_match_score: float
    salary_competitiveness: float
    career_suggestions: List[str]
    recommended_positions: List[Dict[str, Any]]
    missing_skills: List[str]
    strength_areas: List[str]

@router.on_event("startup")
async def startup_event():
    """启动时初始化数据库"""
    try:
        await job_data_service.initialize_database()
    except Exception as e:
        print(f"JH子系统数据库初始化失败: {e}")

@router.get("/overview")
async def get_overview_statistics():
    """获取概览统计信息"""
    try:
        stats = await job_data_service.get_overview_statistics()
        return stats
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取统计信息失败: {str(e)}")

@router.get("/jobs")
async def get_jobs_paginated(
    page: int = Query(1, ge=1, description="页码"),
    page_size: int = Query(20, ge=1, le=100, description="每页大小"),
    job_type: Optional[str] = Query(None, description="职位类型"),
    location: Optional[str] = Query(None, description="工作地点"),
    country: Optional[str] = Query(None, description="国家"),
    schedule_type: Optional[str] = Query(None, description="工作类型"),
    work_from_home: Optional[bool] = Query(None, description="是否远程"),
    search_query: Optional[str] = Query(None, description="搜索关键词"),
    salary_min: Optional[float] = Query(None, description="最低薪资"),
    salary_max: Optional[float] = Query(None, description="最高薪资")
):
    """获取分页职位数据"""
    try:
        filters = JobDataFilters(
            job_type=job_type,
            location=location,
            country=country,
            schedule_type=schedule_type,
            work_from_home=work_from_home,
            search_query=search_query,
            salary_min=salary_min,
            salary_max=salary_max
        )
        
        result = await job_data_service.get_jobs_paginated(
            page=page,
            page_size=page_size,
            filters=filters
        )
        
        return result
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取职位数据失败: {str(e)}")

@router.get("/chart-data/{chart_type}")
async def get_chart_data(chart_type: str):
    """获取图表数据"""
    valid_chart_types = [
        "job_type_distribution",
        "location_distribution", 
        "salary_distribution",
        "time_trend",
        "skills_analysis",
        "company_ranking"
    ]
    
    if chart_type not in valid_chart_types:
        raise HTTPException(
            status_code=400, 
            detail=f"无效的图表类型。支持的类型: {', '.join(valid_chart_types)}"
        )
    
    try:
        data = await job_data_service.get_chart_data(chart_type)
        return data
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取图表数据失败: {str(e)}")

@router.get("/search")
async def search_jobs(
    query: str = Query(..., description="搜索关键词"),
    limit: int = Query(50, ge=1, le=100, description="结果数量限制")
):
    """搜索职位"""
    try:
        jobs = await job_data_service.search_jobs(query, limit)
        return {"jobs": jobs, "total": len(jobs)}
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"搜索职位失败: {str(e)}")

@router.post("/jobs/search")
async def advanced_search_jobs(request: JobSearchRequest):
    """高级职位搜索"""
    try:
        filters = JobDataFilters(
            job_type=request.job_type,
            location=request.location,
            country=request.country,
            schedule_type=request.schedule_type,
            work_from_home=request.work_from_home,
            search_query=request.query,
            salary_min=request.salary_min,
            salary_max=request.salary_max
        )
        
        result = await job_data_service.get_jobs_paginated(
            page=request.page,
            page_size=request.page_size,
            filters=filters
        )
        
        return result
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"高级搜索失败: {str(e)}")

@router.post("/resume/upload")
async def upload_resume(file: UploadFile = File(...)):
    """上传简历文件进行分析"""
    try:
        # 检查文件类型
        allowed_types = ['application/pdf', 'application/msword', 
                        'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
                        'text/html', 'text/plain']
        
        if file.content_type not in allowed_types:
            raise HTTPException(
                status_code=400, 
                detail=f"不支持的文件类型: {file.content_type}。支持的类型: PDF, DOC, DOCX, HTML, TXT"
            )
        
        # 读取文件内容
        content = await file.read()
        text_content = ""
        
        # 根据文件类型解析内容
        if file.content_type == 'application/pdf':
            text_content = extract_text_from_pdf(content)
        elif file.content_type in ['application/msword', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document']:
            text_content = extract_text_from_docx(content)
        elif file.content_type == 'text/html':
            text_content = extract_text_from_html(content)
        else:
            text_content = content.decode('utf-8')
        
        # 分析简历内容
        analysis_result = await analyze_resume_content(text_content)
        
        return {
            "filename": file.filename,
            "content_type": file.content_type,
            "file_size": len(content),
            "extracted_text_length": len(text_content),
            "analysis": analysis_result
        }
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"简历分析失败: {str(e)}")

@router.post("/resume/manual")
async def analyze_manual_resume(resume_data: ManualResumeInput):
    """分析手动输入的简历信息"""
    try:
        # 构建文本内容
        text_content = f"""
        姓名: {resume_data.name}
        邮箱: {resume_data.email or ''}
        电话: {resume_data.phone or ''}
        教育背景: {resume_data.education or ''}
        工作经验: {resume_data.experience or ''}
        技能: {', '.join(resume_data.skills)}
        个人简介: {resume_data.summary or ''}
        目标职位: {resume_data.target_position or ''}
        期望薪资: {resume_data.expected_salary or ''}
        """
        
        # 分析简历内容
        analysis_result = await analyze_resume_content(text_content)
        
        return {
            "input_type": "manual",
            "analysis": analysis_result
        }
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"简历分析失败: {str(e)}")

@router.get("/resume/skills-benchmark")
async def get_skills_benchmark():
    """获取技能基准数据"""
    try:
        # 获取市场热门技能数据（与job_data_service一致的返回结构：labels/values）
        chart_data = await job_data_service.get_chart_data("skillsChart", limit=30)
        
        skills_benchmark = {}
        labels = chart_data.get("labels", [])
        values = chart_data.get("values", [])
        total = sum(values) if values else 1
        
        for i, skill in enumerate(labels):
            demand = values[i] if i < len(values) else 0
            demand_score = round(demand / total * 100, 2) if total else 0
            skills_benchmark[skill] = {
                "demand_score": demand_score,
                "avg_salary": 80000 + (i * 5000),  # 简单模拟薪资
                "growth_trend": "high" if i < 5 else ("medium" if i < 15 else "low")
                        }
        
        return {
            "skills_benchmark": skills_benchmark,
            "market_trends": {
                "hot_skills": labels[:5],
                "emerging_skills": labels[5:10] if len(labels) > 10 else labels[5:],
                "declining_skills": labels[-5:][::-1] if len(labels) >= 5 else []
            }
        }
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取技能基准失败: {str(e)}")

@router.get("/health")
async def health_check():
    """健康检查"""
    return {"status": "healthy", "service": "JH职位数据分析服务"} 

def extract_text_from_pdf(content: bytes) -> str:
    """从PDF文件提取文本"""
    try:
        pdf_file = io.BytesIO(content)
        pdf_reader = PyPDF2.PdfReader(pdf_file)
        text = ""
        for page in pdf_reader.pages:
            text += page.extract_text() + "\n"
        return text.strip()
    except Exception as e:
        raise Exception(f"PDF解析失败: {str(e)}")

def extract_text_from_docx(content: bytes) -> str:
    """从DOCX文件提取文本"""
    try:
        doc_file = io.BytesIO(content)
        doc = docx.Document(doc_file)
        text = ""
        for paragraph in doc.paragraphs:
            text += paragraph.text + "\n"
        return text.strip()
    except Exception as e:
        raise Exception(f"DOCX解析失败: {str(e)}")

def extract_text_from_html(content: bytes) -> str:
    """从HTML文件提取文本"""
    try:
        soup = BeautifulSoup(content, 'html.parser')
        # 移除script和style标签
        for script in soup(["script", "style"]):
            script.decompose()
        return soup.get_text().strip()
    except Exception as e:
        raise Exception(f"HTML解析失败: {str(e)}")

async def analyze_resume_content(text_content: str) -> ResumeAnalysisResult:
    """分析简历内容"""
    try:
        # 获取技能基准数据（与job_data_service的命名保持一致）
        chart_data = await job_data_service.get_chart_data("skillsChart")
        
        # 技能匹配分析
        skill_matches = analyze_skills_in_text(text_content, chart_data)
        
        # 计算竞争力分数
        competitiveness_score = calculate_competitiveness_score(skill_matches, text_content)
        # 修正：skill_match_score 取技能分数的平均值，已是 0-100 区间，不再额外乘以 100
        skill_match_score = sum(skill_matches.values()) / max(len(skill_matches), 1)
        salary_competitiveness = min(skill_match_score * 1.2, 100)
        
        # 生成职业建议
        career_suggestions = generate_career_suggestions(skill_matches, text_content)
        
        # 推荐职位
        recommended_positions = await get_recommended_positions(skill_matches)
        
        # 缺失技能和优势领域
        missing_skills = identify_missing_skills(skill_matches)
        strength_areas = identify_strength_areas(skill_matches)
        
        return ResumeAnalysisResult(
            skill_matches=skill_matches,
            competitiveness_score=competitiveness_score,
            skill_match_score=skill_match_score,
            salary_competitiveness=salary_competitiveness,
            career_suggestions=career_suggestions,
            recommended_positions=recommended_positions,
            missing_skills=missing_skills,
            strength_areas=strength_areas
        )
        
    except Exception as e:
        raise Exception(f"简历内容分析失败: {str(e)}")

def analyze_skills_in_text(text: str, chart_data: dict) -> Dict[str, float]:
    """分析文本中的技能匹配度"""
    skill_matches = {}
    text_lower = text.lower()
    
    # 预定义技能列表
    skills_list = [
        "python", "java", "javascript", "sql", "r", "scala", "c++", "c#",
        "machine learning", "deep learning", "data analysis", "statistics",
        "tensorflow", "pytorch", "scikit-learn", "pandas", "numpy",
        "hadoop", "spark", "kafka", "docker", "kubernetes",
        "aws", "azure", "gcp", "git", "linux", "excel"
    ]
    
    for skill in skills_list:
        # 简单的关键词匹配
        if skill.lower() in text_lower:
            # 计算匹配强度（基于出现次数和上下文）
            count = text_lower.count(skill.lower())
            base_score = min(count * 20, 80)  # 基础分数
            
            # 增加一些随机性使其更真实
            import random
            variation = random.uniform(0.8, 1.2)
            final_score = min(base_score * variation, 100)
            
            skill_matches[skill] = round(final_score, 1)
    
    return skill_matches

def calculate_competitiveness_score(skill_matches: Dict[str, float], text: str) -> float:
    """计算整体竞争力分数"""
    if not skill_matches:
        return 50.0
    
    # 基于技能匹配度计算
    avg_skill_score = sum(skill_matches.values()) / len(skill_matches)
    
    # 基于经验年限（简单解析）
    experience_bonus = 0
    if "年" in text or "year" in text.lower():
        experience_bonus = 10
    
    # 基于教育背景
    education_bonus = 0
    education_keywords = ["学士", "硕士", "博士", "bachelor", "master", "phd", "university", "college"]
    for keyword in education_keywords:
        if keyword in text.lower():
            education_bonus = 5
            break
    
    total_score = min(avg_skill_score + experience_bonus + education_bonus, 100)
    return round(total_score, 1)

def generate_career_suggestions(skill_matches: Dict[str, float], text: str) -> List[str]:
    """生成职业发展建议"""
    suggestions = []
    
    # 基于技能强弱给出建议
    strong_skills = [skill for skill, score in skill_matches.items() if score > 70]
    weak_skills = [skill for skill, score in skill_matches.items() if score < 50]
    
    if strong_skills:
        suggestions.append(f"您在 {', '.join(strong_skills[:3])} 方面表现突出，建议继续深化这些技能")
    
    if weak_skills:
        suggestions.append(f"建议加强 {', '.join(weak_skills[:3])} 相关技能的学习")
    
    # 通用建议
    suggestions.extend([
        "考虑获得相关技术认证来提升竞争力",
        "建议参与开源项目来展示实际技能",
        "定期更新技能以跟上行业发展趋势",
        "考虑建立个人技术博客或作品集"
    ])
    
    return suggestions

async def get_recommended_positions(skill_matches: Dict[str, float]) -> List[Dict[str, Any]]:
    """获取推荐职位（基于技能匹配度，按匹配度降序，返回更多结果）"""
    try:
        # 拉取更多岗位以保证有充足的候选
        jobs_data = await job_data_service.get_jobs_paginated(page=1, page_size=80)
        jobs = jobs_data.get('jobs', [])

        recommended: List[Dict[str, Any]] = []
        for job in jobs:
            job_skills = job.get('job_skills') or []
            # 逐岗位计算匹配度：对岗位技能在候选技能得分中取平均
            if isinstance(job_skills, str):
                try:
                    job_skills = [s.strip() for s in job_skills.split(',') if s.strip()]
                except Exception:
                    job_skills = []
            matched_scores = [float(skill_matches.get(s, 0)) for s in job_skills if s]
            match_score = (sum(matched_scores) / max(len(job_skills), 1)) if job_skills else 0.0

            recommended.append({
                "id": job.get("id"),
                "title": job.get('job_title_short') or job.get('job_title') or "",
                "company": job.get('company_name') or "",
                "location": job.get('job_location') or "",
                "match_score": round(float(match_score), 1),
                "salary_range": f"${job.get('salary_year_avg', 0):,.0f}" if job.get('salary_year_avg') else "待议",
                "type": job.get('job_schedule_type') or "全职",
                "tags": (job_skills or [])[:6],
                "url": job.get('job_posted_url') or job.get('job_apply_url') or "",
            })

        # 按匹配度降序，并返回更多项
        recommended.sort(key=lambda x: x['match_score'], reverse=True)
        return recommended[:30]
    except Exception:
        return []

def identify_missing_skills(skill_matches: Dict[str, float]) -> List[str]:
    """识别缺失的重要技能"""
    important_skills = ["Python", "SQL", "Machine Learning", "Data Analysis", "Git", "Docker"]
    missing = [skill for skill in important_skills if skill.lower() not in [s.lower() for s in skill_matches.keys()]]
    return missing[:5]  # 返回前5个

def identify_strength_areas(skill_matches: Dict[str, float]) -> List[str]:
    """识别优势领域"""
    strength_areas = []
    strong_skills = [skill for skill, score in skill_matches.items() if score > 70]
    
    # 按技能分类
    if any(skill in strong_skills for skill in ["python", "java", "javascript", "c++"]):
        strength_areas.append("编程开发")
    
    if any(skill in strong_skills for skill in ["machine learning", "deep learning", "tensorflow"]):
        strength_areas.append("机器学习")
        
    if any(skill in strong_skills for skill in ["data analysis", "statistics", "sql"]):
        strength_areas.append("数据分析")
    
    if any(skill in strong_skills for skill in ["aws", "azure", "docker", "kubernetes"]):
        strength_areas.append("云计算与DevOps")
    
    return strength_areas 