import requests
from bs4 import BeautifulSoup
import os
import json
from urllib.parse import urljoin
import chardet

def scrape_resume_website(url, save_dir='resume_data'):
    """
    爬取简历网站信息和图片
    
    参数:
        url: 目标网址 (http://120.24.79.72/)
        save_dir: 保存数据的目录
    """
    # 创建保存目录
    os.makedirs(save_dir, exist_ok=True)
    os.makedirs(os.path.join(save_dir, 'images'), exist_ok=True)
    
    try:
        # 1. 获取网页内容
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        
        # 2. 检测并设置正确编码
        encoding = chardet.detect(response.content)['encoding']
        response.encoding = encoding if encoding else 'utf-8'  # 默认使用utf-8
        
        # 3. 解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 4. 提取个人信息
        personal_info = {
            'name': soup.find('h1').text.strip(),
            'job_title': soup.find('header').find('p').text.strip(),
            'contact_info': {
                'phone': soup.find('div', class_='contact-info').find_all('p')[0].text.strip(),
                'email': soup.find('div', class_='contact-info').find_all('p')[1].text.strip(),
                'github': soup.find('div', class_='contact-info').find('a')['href'].strip()
            }
        }
        
        # 5. 提取教育背景
        education = {
            'school': soup.find('section', class_='section').find('strong').text.strip(),
            'period': soup.find('section', class_='section').find_all('p')[1].text.strip(),
            'courses': soup.find('section', class_='section').find_all('p')[2].text.strip()
        }
        
        # 6. 提取技能专长
        skills = [li.text.strip() for li in soup.find_all('section', class_='section')[1].find_all('li')]
        
        # 7. 提取项目经验
        projects = []
        project_sections = soup.find_all('section', class_='section')[2].find_all('div', class_='content')
        for project in project_sections:
            project_data = {
                'title': project.find('h3').text.strip(),
                'time': project.find_all('p')[0].text.strip(),
                'description': project.find_all('p')[1].text.strip(),
                'responsibilities': project.find_all('p')[2].text.strip(),
                'technologies': project.find_all('p')[3].text.strip()
            }
            projects.append(project_data)
        
        # 8. 提取获奖情况
        awards = [li.text.strip() for li in soup.find_all('section', class_='section')[3].find_all('li')]
        
        # 9. 提取自我评价
        self_evaluation = soup.find_all('section', class_='section')[4].find('p').text.strip()
        
        # 10. 下载图片
        img_tag = soup.find('img', class_='profile-img')
        if img_tag and 'src' in img_tag.attrs:
            img_url = urljoin(url, img_tag['src'])
            img_data = requests.get(img_url, headers=headers).content
            img_path = os.path.join(save_dir, 'images', 'profile.jpg')
            with open(img_path, 'wb') as f:
                f.write(img_data)
        
        # 11. 保存所有数据到文件
        data = {
            'personal_info': personal_info,
            'education': education,
            'skills': skills,
            'projects': projects,
            'awards': awards,
            'self_evaluation': self_evaluation
        }
        
        with open(os.path.join(save_dir, 'resume_data.json'), 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=4)
        
        print("爬取完成！数据已保存到", save_dir)
        
    except Exception as e:
        print("爬取过程中出现错误:", e)

# 使用示例
if __name__ == "__main__":
    target_url = "http://112.74.106.152/"
    scrape_resume_website(target_url)