import os
import requests
import time
from bs4 import BeautifulSoup
import pandas as pd
import re
from urllib.parse import urljoin, urlparse
from openpyxl import Workbook
from openpyxl.styles import Alignment, Font
from openpyxl.utils import get_column_letter
from concurrent.futures import ThreadPoolExecutor

class SmartResumeScraper:
    def __init__(self):
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        self.data = []
        self.category_map = {
            '姓名': ['姓名', '名字', 'Name', '个人简介'],
            '电话': ['电话', '手机', 'Tel', 'Phone'],
            '邮箱': ['邮箱', 'Email', 'E-mail'],
            '教育背景': ['教育背景', '学历', 'Education', '学术经历'],
            '工作经历': ['工作经历', '工作经验', 'Employment', '职业经历'],
            '技能': ['技能', '专长', 'Skills', '技术能力'],
            '项目': ['项目经验', '项目经历', 'Projects'],
            '荣誉': ['荣誉', '奖项', 'Awards', 'Achievements']
        }

    def clean_text(self, text):
        """清洗文本中的多余空格和特殊字符"""
        if not text:
            return ""
        text = re.sub(r'\s+', ' ', text).strip()
        return text

    def detect_category(self, text):
        """智能识别信息类别"""
        text = self.clean_text(text)
        for category, keywords in self.category_map.items():
            if any(keyword in text for keyword in keywords):
                return category
        return "其他信息"

    def extract_contact_info(self, soup):
        """提取联系方式（深度搜索）"""
        contact = {}
        # 常见联系方式标签
        contact_tags = soup.find_all(['p', 'div', 'span', 'li'])
        for tag in contact_tags:
            text = self.clean_text(tag.get_text())
            if not text:
                continue

            # 电话提取
            phone_match = re.search(r'(电话|手机|Tel|Phone)[：:\s]*(1[3-9]\d{9})', text)
            if phone_match and 'phone' not in contact:
                contact['phone'] = phone_match.group(2)

            # 邮箱提取
            email_match = re.search(r'(邮箱|Email|E-mail)[：:\s]*([a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,})', text)
            if email_match and 'email' not in contact:
                contact['email'] = email_match.group(2)

            # 如果已经找到全部联系方式则提前退出
            if len(contact) >= 2:
                break

        return contact

    def extract_section_content(self, soup, section_title):
        """提取特定章节内容"""
        # 尝试多种定位方式
        section = (soup.find('h2', string=section_title) or
                   soup.find('h3', string=section_title) or
                   soup.find(lambda tag: tag.name in ['h2', 'h3'] and section_title in tag.get_text()))

        if not section:
            return ""

        content = []
        next_node = section.find_next_sibling()
        while next_node and next_node.name not in ['h2', 'h3']:
            if next_node.get_text(strip=True):
                content.append(self.clean_text(next_node.get_text()))
            next_node = next_node.find_next_sibling()

        return "\n".join(content)

    def scrape_resume(self, url):
        """主爬取方法"""
        try:
            print(f"🔍 正在解析简历: {url}")
            response = requests.get(url, headers=self.headers, timeout=15)
            response.encoding = response.apparent_encoding  # 处理字符编码
            response.raise_for_status()

            soup = BeautifulSoup(response.text, 'html.parser')

            # 1. 提取基本信息
            resume_info = {
                '来源网址': url,
                '域名': urlparse(url).netloc
            }

            # 2. 提取姓名（多重验证）
            name = (soup.find('h1') or soup.find(class_='name') or
                    soup.find(attrs={'id': 'name'}))
            if name:
                resume_info['姓名'] = self.clean_text(name.get_text())

            # 3. 提取联系方式
            contact_info = self.extract_contact_info(soup)
            resume_info.update(contact_info)

            # 4. 提取各章节信息
            sections = soup.find_all(['h2', 'h3'])
            for section in sections:
                section_title = self.clean_text(section.get_text())
                category = self.detect_category(section_title)

                # 跳过已处理的类别
                if category in resume_info:
                    continue

                content = []
                next_node = section.next_sibling
                while next_node and getattr(next_node, 'name', None) not in ['h2', 'h3']:
                    if getattr(next_node, 'name', None) in ['ul', 'ol']:
                        items = [f"• {li.get_text(strip=True)}" for li in next_node.find_all('li')]
                        content.extend(items)
                    elif getattr(next_node, 'get_text', None):
                        text = next_node.get_text(strip=True)
                        if text:
                            content.append(text)
                    next_node = next_node.next_sibling

                if content:
                    resume_info[category] = "\n".join(content)

            self.data.append(resume_info)
            print(f"✅ 成功解析: {resume_info.get('姓名', '无名简历')}")
            return True

        except Exception as e:
            print(f"❌ 解析失败 {url}: {str(e)}")
            self.data.append({
                '来源网址': url,
                '错误信息': str(e)
            })
            return False

    def save_to_excel(self, filename='简历数据.xlsx'):
        """保存数据到Excel并自动调整格式"""
        if not self.data:
            print("⚠️ 没有数据可保存")
            return False

        # 创建DataFrame并重新排列列
        df = pd.DataFrame(self.data)

        # 定义列顺序（已知列在前，其他列在后）
        predefined_columns = ['来源网址', '域名', '姓名', '电话', '邮箱',
                              '教育背景', '工作经历', '技能', '项目', '荣誉']
        other_columns = [col for col in df.columns if col not in predefined_columns]

        # 只保留存在的列
        available_columns = [col for col in predefined_columns + other_columns if col in df.columns]
        df = df[available_columns]

        # 创建Excel writer对象
        with pd.ExcelWriter(filename, engine='openpyxl') as writer:
            df.to_excel(writer, index=False, sheet_name='简历信息')

            # 获取workbook和worksheet对象进行格式设置
            workbook = writer.book
            worksheet = writer.sheets['简历信息']

            # 设置自动列宽
            for column in df.columns:
                max_length = max(
                    df[column].astype(str).map(lambda x: len(x.encode('utf-8'))).max(),  # 使用utf-8编码
                    len(column.encode('utf-8'))
                ) + 2
                col_letter = get_column_letter(df.columns.get_loc(column) + 1)
                worksheet.column_dimensions[col_letter].width = min(max_length, 50)

            # 设置文本自动换行
            for row in worksheet.iter_rows(min_row=2, max_row=worksheet.max_row):
                for cell in row:
                    cell.alignment = Alignment(wrap_text=True, vertical='top')

            # 设置标题行样式
            for cell in worksheet[1]:
                cell.font = Font(bold=True)

            # 冻结首行
            worksheet.freeze_panes = 'A2'

        print(f"\n💾 数据已保存到 {filename}")
        return True

    def scrape_multiple(self, urls, output_file='简历汇总.xlsx', max_workers=3):
        """批量爬取多个简历"""
        start_time = time.time()
        print(f"🛠️ 开始批量爬取 {len(urls)} 份简历...")

        # 过滤无效URL
        valid_urls = []
        for url in urls:
            url = url.strip()
            if not url:
                continue
            if not url.startswith(('http://', 'https://')):
                url = 'http://' + url
            valid_urls.append(url)

        # 并发爬取
        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            executor.map(self.scrape_resume, valid_urls)

        # 保存结果
        self.save_to_excel(output_file)

        # 生成报告
        success_count = sum(1 for item in self.data if '错误信息' not in item)
        print(f"\n📊 爬取完成！用时: {time.time()-start_time:.2f}秒")
        print(f"✅ 成功爬取: {success_count} 份简历")
        print(f"❌ 失败爬取: {len(valid_urls) - success_count} 份")

# 使用示例
if __name__ == "__main__":
    # 1. 配置要爬取的简历URL列表
    resume_urls = [
    '120.24.189.244',
    '120.76.242.204',
    '112.74.56.2',
    '120.79.188.49',
    '120.77.168.121',
    '120.76.141.247',
    '39.108.85.95',
    '39.108.165.103',
    '120.24.224.176',
    '112.74.96.250',
    '120.24.179.107',
    '112.74.96.96',
    '39.108.85.67',
    '120.76.248.64',
    '120.24.163.199',
    '39.108.86.199',
    '120.76.96.109',
    '39.108.122.181',
    '120.76.42.139',
    '120.77.205.166',
    '119.23.65.113',
    '119.23.146.144',
    '120.79.161.204',
    '120.26.16.17',
    '112.74.96.206',
    '120.76.243.45',
    '120.24.184.187',
    '112.74.163.99',
    '39.108.179.242',
    '120.76.250.150',
    '120.24.63.157',
    '120.79.176.186',
    '120.79.96.49',
    '119.23.146.116',
    '39.108.214.128',
    '120.76.243.36',
    '120.76.200.219',
    '120.26.16.17',
    # 'http://120.26.16.17/site2'
    # 'http://120.26.16.17/site1'
        # 添加更多简历URL...
    ]

    # 2. 创建并运行爬虫
    scraper = SmartResumeScraper()
    scraper.scrape_multiple(
        urls=resume_urls,
        output_file='智能简历汇总.xlsx',
        max_workers=5  # 并发数
    )