import os
import requests
import json
from bs4 import BeautifulSoup
from urllib.parse import urljoin

url = "http://120.76.96.109/"

def scrape_resume():
    # 创建保存文件夹（保持不变）
    if not os.path.exists('resume_data'):
        os.makedirs('resume_data')
    if not os.path.exists('resume_data/images'):
        os.makedirs('resume_data/images')

    try:
        # 发送请求（保持不变）
        headers = {'User-Agent': 'Mozilla/5.0'}  # 简化的UA
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()
        response.encoding = 'utf-8'
        soup = BeautifulSoup(response.text, 'html.parser')

        resume_data = {
            'basic_info': {},
            'education': [],
            'practice': [],
            'skills': [],
            'self_evaluation': ''
        }

        # 1. 提取基本信息
        header = soup.find('header', class_='header')
        if header:
            # 下载头像
            img_tag = header.find('img', class_='avatar')
            if img_tag and img_tag.get('src'):
                img_url = urljoin(url, img_tag['src'])
                download_image(img_url, 'profile.jpg')
                resume_data['basic_info']['photo'] = 'resume_data/images/profile.jpg'

            # 提取基本信息
            basic_info = {
                'name': header.find('h1', class_='name').get_text(strip=True) if header.find('h1', class_='name') else "",
                'job_intention': header.find('p', class_='job-intention').get_text(strip=True).replace("求职意向: ", "") if header.find('p', class_='job-intention') else "",
                'age': header.find('p', class_='age').get_text(strip=True).replace("年龄: ", "") if header.find('p', class_='age') else "",
                'phone': header.find('p', class_='phone').get_text(strip=True).replace("电话: ", "") if header.find('p', class_='phone') else "",
                'address': header.find('p', class_='address').get_text(strip=True).replace("现居: ", "") if header.find('p', class_='address') else "",
                'email': header.find('p', class_='email').get_text(strip=True).replace("邮箱: ", "") if header.find('p', class_='email') else ""
            }
            resume_data['basic_info'].update(basic_info)

        # 2. 教育背景
        edu_section = soup.find('h2', class_='section-title', string=lambda t: '教育背景' in t)
        if edu_section:
            section = edu_section.find_parent('section')
            for item in section.find_all('div', class_='education-item'):
                edu = {
                    'period': item.find('span', class_='date').get_text(strip=True) if item.find('span', class_='date') else "",
                    'school': item.find('span', class_='school').get_text(strip=True) if item.find('span', class_='school') else "",
                    'major': item.find('span', class_='major').get_text(strip=True) if item.find('span', class_='major') else "",
                    'degree': item.find('span', class_='degree').get_text(strip=True) if item.find('span', class_='degree') else ""
                }
                resume_data['education'].append(edu)

        # 3. 实践经历
        practice_section = soup.find('h2', class_='section-title', string=lambda t: '实践经历' in t)
        if practice_section:
            section = practice_section.find_parent('section')
            for item in section.find_all('div', class_='experience-item'):
                practice = {
                    'period': item.find('span', class_='date').get_text(strip=True) if item.find('span', class_='date') else "",
                    'company': item.find('span', class_='company').get_text(strip=True) if item.find('span', class_='company') else "",
                    'position': item.find('span', class_='position').get_text(strip=True) if item.find('span', class_='position') else "",
                    'details': [li.get_text(strip=True) for li in item.find_all('li')]
                }
                resume_data['practice'].append(practice)

        # 4. 技能证书
        skills_section = soup.find('h2', class_='section-title', string=lambda t: '技能' in t)
        if skills_section:
            section = skills_section.find_parent('section')
            if section.find('ul', class_='skills-list'):
                resume_data['skills'] = [li.get_text(strip=True).replace(';', '') for li in section.find('ul', class_='skills-list').find_all('li')]

        # 5. 自我评价
        eval_section = soup.find('h2', class_='section-title', string=lambda t: '自我评价' in t)
        if eval_section:
            section = eval_section.find_parent('section')
            if section.find('ul', class_='evaluation-list'):
                evaluations = [li.get_text(strip=True) for li in section.find('ul', class_='evaluation-list').find_all('li')]
                resume_data['self_evaluation'] = '\n'.join(evaluations)

        # 保存数据
        save_data(resume_data)
        print("爬取完成！数据已保存到 resume_data 文件夹")
        return resume_data

    except Exception as e:
        print(f"发生错误: {str(e)}")

def download_image(img_url, filename):
    # 保持不变
    try:
        img_data = requests.get(img_url, timeout=10).content
        with open(f'resume_data/images/{filename}', 'wb') as f:
            f.write(img_data)
        print(f"下载图片成功: {filename}")
    except Exception as e:
        print(f"下载图片失败: {str(e)}")

def save_data(data):
    # 保持不变
    with open('resume_data/resume.json', 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=4)

if __name__ == '__main__':
    data = scrape_resume()
    if data:
        print("\n=== 提取结果示例 ===")
        print(f"姓名: {data['basic_info']['name']}")
        print(f"电话: {data['basic_info']['phone']}")
        print("教育背景:")
        for edu in data['education']:
            print(f"- {edu['school']} ({edu['period']})")
        print("主要技能:")
        for skill in data['skills']:
            print(f"- {skill}")