import os
import time
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import urllib3
import json

urllib3.disable_warnings()

def scrape_resume(target_url):
    """爬取目标URL的简历"""
    # 创建保存文件夹
    os.makedirs('resume_data/images', exist_ok=True)
    
    try:
        # 更完整的请求头
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
            'Connection': 'keep-alive'
        }
        
        # 使用会话并添加延迟
        session = requests.Session()
        time.sleep(2)  # 避免请求过快
        
        # 发送请求（忽略SSL验证）
        response = session.get(target_url, headers=headers, timeout=30, verify=False)
        response.raise_for_status()
        response.encoding = 'utf-8'
        
        # 解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 检查是否获取到有效内容
        if not soup.find('header'):
            raise ValueError("未能获取到有效的简历内容，可能是反爬机制阻止了访问")
            
        # 提取的数据字典
        resume_data = {
            'basic_info': {},
            'personality': {},
            'education': [],
            'skills': {},
            'projects': [],
            'awards': [],
            'self_evaluation': {}
        }

        # 1. 提取基本信息
        header = soup.find('header', class_='hero')
        if header:
            # 提取个人照片
            profile_img = header.find('img', class_='profile-img')
            if profile_img and profile_img.get('src'):
                img_url = urljoin(target_url, profile_img['src'])
                try:
                    img_data = session.get(img_url, headers=headers, timeout=10, verify=False).content
                    with open('resume_data/images/profile.jpg', 'wb') as f:
                        f.write(img_data)
                    resume_data['basic_info']['photo'] = 'resume_data/images/profile.jpg'
                except Exception as e:
                    print(f"下载图片失败: {e}")

            # 提取姓名和职位
            name = header.find('h1', class_='name').get_text(strip=True) if header.find('h1', class_='name') else ""
            job_title = header.find('p', class_='job-title').get_text(strip=True) if header.find('p', class_='job-title') else ""
            
            # 提取联系方式
            contact_info = {}
            contact_items = header.find_all('div', class_='contact-item')
            for item in contact_items:
                icon = item.find('i')
                if icon:
                    if 'fa-phone-alt' in icon.get('class', []):
                        contact_info['phone'] = item.get_text(strip=True)
                    elif 'fa-envelope' in icon.get('class', []):
                        contact_info['email'] = item.get_text(strip=True)
                    elif 'fa-git-alt' in icon.get('class', []):
                        contact_info['gitee'] = item.find('a')['href'] if item.find('a') else item.get_text(strip=True)
                    elif 'fa-map-marker-alt' in icon.get('class', []):
                        contact_info['location'] = item.get_text(strip=True)

            resume_data['basic_info'].update({
                'name': name,
                'job_title': job_title,
                'contact': contact_info
            })

        # 2. 提取MBTI个性信息
        personality_section = soup.find('section', class_='personality-card')
        if personality_section:
            personality = {
                'mbti_type': personality_section.find('span', class_='mbti-badge').get_text(strip=True) if personality_section.find('span', class_='mbti-badge') else "",
                'mbti_title': personality_section.find('span', class_='mbti-title').get_text(strip=True) if personality_section.find('span', class_='mbti-title') else "",
                'quote': personality_section.find('blockquote', class_='mbti-quote').get_text(strip=True) if personality_section.find('blockquote', class_='mbti-quote') else "",
                'core_traits': [],
                'development_advantages': []
            }
            
            # 提取核心特质
            core_traits_div = personality_section.find('h3', string=lambda t: '核心特质' in t if t else False)
            if core_traits_div:
                traits_list = core_traits_div.find_next('ul')
                if traits_list:
                    personality['core_traits'] = [li.get_text(strip=True) for li in traits_list.find_all('li')]
            
            # 提取开发优势
            advantages_div = personality_section.find('h3', string=lambda t: '开发优势' in t if t else False)
            if advantages_div:
                advantages_list = advantages_div.find_next('ul')
                if advantages_list:
                    personality['development_advantages'] = [li.get_text(strip=True) for li in advantages_list.find_all('li')]
            
            resume_data['personality'] = personality

        # 3. 提取教育背景
        education_section = soup.find('section', class_='education-card')
        if education_section:
            edu_item = education_section.find('div', class_='edu-item')
            if edu_item:
                education = {
                    'school': edu_item.find('h3').get_text(strip=True) if edu_item.find('h3') else "",
                    'period': edu_item.find('span', class_='edu-date').get_text(strip=True) if edu_item.find('span', class_='edu-date') else "",
                    'major': edu_item.find('p', class_='edu-major').get_text(strip=True) if edu_item.find('p', class_='edu-major') else "",
                    'gpa': edu_item.find('span', class_='gpa-badge').get_text(strip=True) if edu_item.find('span', class_='gpa-badge') else "",
                    'rank': edu_item.find('span', class_='gpa-rank').get_text(strip=True) if edu_item.find('span', class_='gpa-rank') else "",
                    'courses': []
                }
                
                # 提取核心课程
                courses_heading = edu_item.find('h4', string=lambda t: '核心课程' in t if t else False)
                if courses_heading:
                    courses_ul = courses_heading.find_next('ul')
                    if courses_ul:
                        education['courses'] = [li.get_text(strip=True) for li in courses_ul.find_all('li')]
                
                # 提取技能掌握
                skill_progress = edu_item.find('div', class_='skill-progress')
                if skill_progress:
                    education['skills'] = []
                    progress_items = skill_progress.find_all('div', class_='progress-item')
                    for item in progress_items:
                        skill_name = item.find('span').get_text(strip=True) if item.find('span') else ""
                        skill_percent = item.find('div', class_='progress-fill').get('style', '').split('width:')[-1].split('%')[0].strip() if item.find('div', class_='progress-fill') else ""
                        education['skills'].append({
                            'name': skill_name,
                            'level': skill_percent
                        })
                
                resume_data['education'].append(education)

        # 4. 提取技术能力
        skills_section = soup.find('section', class_='card', string=lambda t: '技术能力' in t if t else False)
        if not skills_section:
            skills_section = soup.find('h2', string=lambda t: '技术能力' in t if t else False)
            if skills_section:
                skills_section = skills_section.find_parent('section')
        
        if skills_section:
            skill_categories = {}
            skill_items = skills_section.find_all('div', class_='skill-item')
            for item in skill_items:
                category = item.find('h3').get_text(strip=True) if item.find('h3') else "其他技能"
                skills = [li.get_text(strip=True) for li in item.find_all('li')]
                skill_categories[category] = skills
            
            resume_data['skills'] = skill_categories

        # 5. 提取项目经验
        projects_section = soup.find('section', class_='card', string=lambda t: '项目经验' in t if t else False)
        if not projects_section:
            projects_section = soup.find('h2', string=lambda t: '项目经验' in t if t else False)
            if projects_section:
                projects_section = projects_section.find_parent('section')
        
        if projects_section:
            project_cards = projects_section.find_all('article', class_='project-card')
            for card in project_cards:
                project = {
                    'name': card.find('h3').get_text(strip=True) if card.find('h3') else "",
                    'time': card.find('p', class_='project-time').get_text(strip=True) if card.find('p', class_='project-time') else "",
                    'tech_stack': [tag.get_text(strip=True) for tag in card.find_all('span', class_='tech-tag')],
                    'details': [li.get_text(strip=True) for li in card.find_all('li', class_='project-detail')]
                }
                resume_data['projects'].append(project)

        # 6. 提取获奖情况
        awards_section = soup.find('section', class_='awards-card')
        if awards_section:
            award_items = awards_section.find_all('div', class_='award-item')
            for item in award_items:
                award = {
                    'name': item.find('h3').get_text(strip=True) if item.find('h3') else "",
                    'description': item.find('p').get_text(strip=True) if item.find('p') else ""
                }
                resume_data['awards'].append(award)

        # 7. 提取自我评价
        eval_section = soup.find('section', class_='self-eval-card')
        if eval_section:
            evaluation = {
                'content': eval_section.find('div', class_='zwpj').get_text(strip=True) if eval_section.find('div', class_='zwpj') else "",
                'quote': eval_section.find('div', class_='eval-quote').find('p').get_text(strip=True) if eval_section.find('div', class_='eval-quote') else "",
                'skill_meters': []
            }
            
            # 提取技能评分
            meter_items = eval_section.find_all('div', class_='meter-item')
            for item in meter_items:
                skill = {
                    'name': item.find('label').get_text(strip=True) if item.find('label') else "",
                    'level': item.find('div', class_='meter-bar').get('style', '').split('--width:')[-1].split('%')[0].strip() if item.find('div', class_='meter-bar') else ""
                }
                evaluation['skill_meters'].append(skill)
            
            resume_data['self_evaluation'] = evaluation

        # 保存提取的数据到文件
        with open('resume_data/resume.json', 'w', encoding='utf-8') as f:
            json.dump(resume_data, f, ensure_ascii=False, indent=4)
        
        print("爬取完成！数据已保存到 resume_data 文件夹")
        return resume_data

    except requests.exceptions.RequestException as e:
        print(f"请求出错: {str(e)}")
        return None

    except Exception as e:
        print(f"发生错误: {str(e)}")
        return None

if __name__ == '__main__':
    # 测试URL
    test_url = "http://120.24.189.244/"
    
    # 执行爬取
    result = scrape_resume(test_url)
    
    if result:
        print("爬取成功！")
        print("姓名:", result['basic_info']['name'])
        print("职位:", result['basic_info']['job_title'])
        print("教育背景:", result['education'])
    else:
        print("爬取失败，请检查以上错误信息")