import requests
from bs4 import BeautifulSoup
import os
from urllib.parse import urljoin

def scrape_resume(url):
    """爬取简历核心函数"""
    headers = {
        'User-Agent': 'Mozilla/5.0',
        'Accept-Language': 'en-US,en;q=0.9'
    }
    
    try:
        # 1. 获取页面内容
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()
        soup = BeautifulSoup(response.text, 'html.parser')

        # 2. 提取基本信息
        data = {
            'basic_info': {
                'name': extract_text(soup, ['h1.name', 'h1']),
                'job_intention': extract_text(soup, ['p.position', 'h2.title']),
                'photo': download_photo(soup, url),
                'contact': {
                    'email': extract_contact(soup, r'[\w\.-]+@[\w\.-]+'),
                    'phone': extract_contact(soup, r'1[3-9]\d{9}')
                }
            },
            'education': extract_section(soup, '教育背景'),
            'skills': extract_list(soup, '技能')
        }
        return data

    except Exception as e:
        raise Exception(f"爬取失败: {str(e)}")

# ---------- 辅助函数 ----------
def extract_text(soup, selectors):
    """通用文本提取"""
    for selector in selectors:
        elem = soup.select_one(selector)
        if elem and elem.get_text(strip=True):
            return elem.get_text(strip=True)
    return "N/A"

def extract_contact(soup, pattern):
    """用正则提取联系方式"""
    import re
    match = re.search(pattern, soup.get_text())
    return match.group(0) if match else "N/A"

def extract_section(soup, title):
    """提取区块内容"""
    section = soup.find(lambda tag: tag.name in ['h2', 'h3'] and title in tag.text)
    if section:
        content = section.find_next('div', class_='content') or section.find_next('ul')
        return [li.get_text(strip=True) for li in content.find_all('li')] if content else []
    return []

def extract_list(soup, title):
    """提取列表项"""
    return extract_section(soup, title)

def download_photo(soup, base_url):
    """下载头像图片"""
    img = soup.select_one('img.avatar, img.profile')
    if img and img.get('src'):
        img_url = urljoin(base_url, img['src'])
        try:
            os.makedirs('resume_data/images', exist_ok=True)
            local_path = 'resume_data/images/profile.jpg'
            with open(local_path, 'wb') as f:
                f.write(requests.get(img_url).content)
            return local_path
        except:
            pass
    return ""