import requests
import pandas as pd
from datetime import datetime
import time
import random
from bs4 import BeautifulSoup
import logging
import os
import re
from fake_useragent import UserAgent
from urllib.parse import quote

# 添加Selenium相关导入
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class JobCrawler:
    """职位数据爬取类"""
    
    def __init__(self, config):
        self.config = config
        self.session = requests.Session()
        # 尝试初始化fake_useragent，如果失败则使用备用User-Agent
        try:
            self.ua = UserAgent()
        except:
            logger.warning("无法初始化fake_useragent，将使用备用User-Agent")
            self.ua = None
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36",
            # "Authorization": f"Bearer {self.config['boss_api_key']}"
        }
    
    def get_random_user_agent(self):
        """获取随机User-Agent"""
        if self.ua:
            return self.ua.random
        else:
            # 备用User-Agent列表
            user_agents = [
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36",
                "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15",
                "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
                "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36"
            ]
            return random.choice(user_agents)
    
    def crawl_jobs(self, keywords, pages=3):
        """爬取职位数据的主方法
        
        Args:
            keywords (list): 搜索关键词列表
            pages (int): 每个关键词爬取的页数
            
        Returns:
            pd.DataFrame: 爬取的职位数据
        """
        # 尝试各种爬虫方法，直到成功
        all_jobs = []
        
        logger.info("开始使用requests和BeautifulSoup爬取职位数据...")
        
        # 尝试从各个网站爬取数据
        try:
            # 首先尝试爬取Indeed
            indeed_jobs = self.crawl_indeed_jobs(keywords, pages)
            if not indeed_jobs.empty:
                all_jobs.append(indeed_jobs)
                logger.info(f"成功从Indeed爬取了{len(indeed_jobs)}条数据")
        except Exception as e:
            logger.error(f"从Indeed爬取数据失败: {str(e)}")
        
        try:
            # 然后尝试爬取CSDN职位
            csdn_jobs = self.crawl_csdn_jobs(keywords, pages)
            if not csdn_jobs.empty:
                all_jobs.append(csdn_jobs)
                logger.info(f"成功从CSDN爬取了{len(csdn_jobs)}条数据")
        except Exception as e:
            logger.error(f"从CSDN爬取数据失败: {str(e)}")
        
        # 可以继续添加其他网站的爬取方法...
        
        # 如果仍然没有数据，尝试最后的方法
        if not all_jobs:
            try:
                # 使用离线数据集
                offline_jobs = self.load_job_dataset(keywords=keywords)
                if not offline_jobs.empty:
                    all_jobs.append(offline_jobs)
                    logger.info(f"成功从离线数据集加载了{len(offline_jobs)}条数据")
            except Exception as e:
                logger.error(f"加载离线数据集失败: {str(e)}")
        
        # 合并所有数据
        if all_jobs:
            jobs_df = pd.concat(all_jobs, ignore_index=True)
            
            # 确保数据目录存在
            os.makedirs("data", exist_ok=True)
            
            # 保存到CSV文件
            timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
            filename = f"data/jobs_{timestamp}.csv"
            jobs_df.to_csv(filename, index=False, encoding='utf-8')
            
            logger.info(f"共爬取 {len(jobs_df)} 条职位数据，已保存到 {filename}")
            return jobs_df
        else:
            logger.error("所有爬取方法都失败了，无法获取职位数据")
            return pd.DataFrame()
    
    def crawl_indeed_jobs(self, keywords, pages=3):
        """从Indeed国际版爬取职位数据
        
        Args:
            keywords (list): 搜索关键词列表
            pages (int): 每个关键词爬取的页数
            
        Returns:
            pd.DataFrame: 爬取的职位数据
        """
        all_jobs = []
        
        logger.info("开始从Indeed爬取职位数据...")
        
        for keyword in keywords:
            logger.info(f"正在爬取关键词: {keyword}")
            
            for page in range(pages):
                # Indeed的搜索URL
                start_index = page * 10  # 每页显示10个结果
                url = f"https://www.indeed.com/jobs?q={quote(keyword)}&start={start_index}"
                
                # 随机选择User-Agent
                headers = {
                    "User-Agent": self.get_random_user_agent(),
                    "Accept-Language": "en-US,en;q=0.9",
                    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
                    "Referer": "https://www.indeed.com/",
                    "DNT": "1"
                }
                
                logger.info(f"请求页面: {url}")
                
                try:
                    # 发送请求获取页面
                    response = requests.get(url, headers=headers, timeout=15)
                    response.raise_for_status()
                    
                    # 解析页面内容
                    soup = BeautifulSoup(response.text, 'html.parser')
                    
                    # 找到职位列表
                    job_cards = soup.select(".jobsearch-ResultsList .tapItem")
                    
                    if not job_cards:
                        logger.warning(f"页面未找到职位卡片，可能需要调整选择器")
                        # 保存页面以便分析
                        with open(f"debug_indeed_{keyword}_{page}.html", "w", encoding="utf-8") as f:
                            f.write(response.text)
                    
                    logger.info(f"找到 {len(job_cards)} 个职位")
                    
                    for card in job_cards:
                        try:
                            # 提取职位标题
                            title_elem = card.select_one("h2.jobTitle span")
                            title = title_elem.text.strip() if title_elem else ""
                            
                            # 提取公司名称
                            company_elem = card.select_one(".companyName")
                            company = company_elem.text.strip() if company_elem else ""
                            
                            # 提取位置
                            location_elem = card.select_one(".companyLocation")
                            location = location_elem.text.strip() if location_elem else ""
                            
                            # 提取薪资
                            salary_elem = card.select_one(".salary-snippet")
                            if not salary_elem:
                                salary_elem = card.select_one(".estimated-salary")
                            salary = salary_elem.text.strip() if salary_elem else "面议"
                            
                            # 提取职位摘要
                            summary_elem = card.select_one(".job-snippet")
                            requirements = summary_elem.text.strip() if summary_elem else ""
                            
                            # 创建职位数据字典
                            job_data = {
                                'title': title,
                                'company': company,
                                'salary': salary,
                                'location': location,
                                'experience': "根据职位描述判断",
                                'education': "根据职位描述判断",
                                'requirements': requirements,
                                'industry': "IT/互联网",
                                'keyword': keyword,
                                'crawl_date': datetime.now().strftime('%Y-%m-%d')
                            }
                            
                            # 添加有效数据
                            if job_data['title'] and job_data['company']:
                                all_jobs.append(job_data)
                                logger.debug(f"成功爬取职位: {job_data['title']} at {job_data['company']}")
                            
                        except Exception as e:
                            logger.error(f"解析职位卡片失败: {str(e)}")
                    
                    # 随机延时避免被封
                    delay = random.uniform(2, 5)
                    logger.info(f"页面处理完成，等待 {delay:.1f} 秒...")
                    time.sleep(delay)
                    
                except requests.exceptions.RequestException as e:
                    logger.error(f"请求页面失败: {str(e)}")
                    time.sleep(3)
                except Exception as e:
                    logger.error(f"处理页面时出错: {str(e)}")
                    time.sleep(3)
        
        # 转换为DataFrame
        if not all_jobs:
            logger.warning("没有从Indeed爬取到任何职位数据")
            return pd.DataFrame()
            
        return pd.DataFrame(all_jobs)
    
    def crawl_csdn_jobs(self, keywords, pages=3):
        """从CSDN招聘爬取职位数据
        
        Args:
            keywords (list): 搜索关键词列表
            pages (int): 每个关键词爬取的页数
            
        Returns:
            pd.DataFrame: 爬取的职位数据
        """
        all_jobs = []
        
        logger.info("开始从CSDN招聘爬取职位数据...")
        
        for keyword in keywords:
            logger.info(f"正在爬取关键词: {keyword}")
            
            for page in range(1, pages + 1):
                # CSDN招聘的搜索URL
                url = f"https://job.csdn.net/Jobs/index.html?keyWord={quote(keyword)}&page={page}"
                
                headers = {
                    "User-Agent": self.get_random_user_agent(),
                    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
                    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
                    "Referer": "https://job.csdn.net/",
                }
                
                logger.info(f"请求页面: {url}")
                
                try:
                    # 发送请求获取页面
                    response = requests.get(url, headers=headers, timeout=15)
                    response.raise_for_status()
                    
                    # 解析页面内容
                    soup = BeautifulSoup(response.text, 'html.parser')
                    
                    # 找到职位列表
                    job_cards = soup.select(".position-list .position-item")
                    
                    if not job_cards:
                        logger.warning(f"页面未找到职位卡片，可能需要调整选择器")
                        # 保存页面以便分析
                        with open(f"debug_csdn_{keyword}_{page}.html", "w", encoding="utf-8") as f:
                            f.write(response.text)
                    
                    logger.info(f"找到 {len(job_cards)} 个职位")
                    
                    # 解析每个职位卡片
                    for card in job_cards:
                        try:
                            # 提取职位标题
                            title_elem = card.select_one(".position-title")
                            title = title_elem.text.strip() if title_elem else ""
                            
                            # 提取公司名称
                            company_elem = card.select_one(".company-name")
                            company = company_elem.text.strip() if company_elem else ""
                            
                            # 提取位置、经验和教育要求
                            job_info = card.select(".job-info span")
                            location = job_info[0].text.strip() if len(job_info) > 0 else ""
                            experience = job_info[1].text.strip() if len(job_info) > 1 else ""
                            education = job_info[2].text.strip() if len(job_info) > 2 else ""
                            
                            # 提取薪资
                            salary_elem = card.select_one(".job-salary")
                            salary = salary_elem.text.strip() if salary_elem else "面议"
                            
                            # 提取职位描述
                            desc_elem = card.select_one(".job-desc")
                            requirements = desc_elem.text.strip() if desc_elem else ""
                            
                            # 提取公司行业
                            industry_elem = card.select_one(".company-type")
                            industry = industry_elem.text.strip() if industry_elem else "IT/互联网"
                            
                            # 创建职位数据字典
                            job_data = {
                                'title': title,
                                'company': company,
                                'salary': salary,
                                'location': location,
                                'experience': experience,
                                'education': education,
                                'requirements': requirements,
                                'industry': industry,
                                'keyword': keyword,
                                'crawl_date': datetime.now().strftime('%Y-%m-%d')
                            }
                            
                            # 添加有效数据
                            if job_data['title'] and job_data['company']:
                                all_jobs.append(job_data)
                                logger.debug(f"成功爬取职位: {job_data['title']} at {job_data['company']}")
                            
                        except Exception as e:
                            logger.error(f"解析职位卡片失败: {str(e)}")
                    
                    # 随机延时避免被封
                    delay = random.uniform(2, 5)
                    logger.info(f"页面处理完成，等待 {delay:.1f} 秒...")
                    time.sleep(delay)
                    
                except requests.exceptions.RequestException as e:
                    logger.error(f"请求页面失败: {str(e)}")
                    time.sleep(3)
                except Exception as e:
                    logger.error(f"处理页面时出错: {str(e)}")
                    time.sleep(3)
        
        # 转换为DataFrame
        if not all_jobs:
            logger.warning("没有从CSDN爬取到任何职位数据")
            return pd.DataFrame()
            
        return pd.DataFrame(all_jobs)
    
    def load_job_dataset(self, keywords=None):
        """创建基本离线数据集用于测试
        
        Args:
            keywords (list): 筛选关键词列表
            
        Returns:
            pd.DataFrame: 离线职位数据
        """
        logger.info("使用静态数据集...")
        
        # 创建一个基本数据集供测试使用
        jobs_data = []
        
        # 数据科学相关职位
        data_science_jobs = [
            {
                "title": "数据科学家",
                "company": "腾讯",
                "salary": "25K-40K",
                "location": "上海",
                "experience": "3-5年",
                "education": "硕士及以上",
                "requirements": "精通Python, SQL, 机器学习, 深度学习, 具有数据挖掘经验, 熟悉常见算法框架如TensorFlow, PyTorch",
                "industry": "互联网",
                "keyword": "数据科学",
                "crawl_date": datetime.now().strftime('%Y-%m-%d')
            },
            {
                "title": "高级数据分析师",
                "company": "阿里巴巴",
                "salary": "30K-45K",
                "location": "杭州",
                "experience": "5-7年",
                "education": "本科及以上",
                "requirements": "熟悉数据仓库设计, 精通SQL和Python, 熟悉Hadoop生态系统, 有大数据处理经验",
                "industry": "互联网/电子商务",
                "keyword": "数据分析",
                "crawl_date": datetime.now().strftime('%Y-%m-%d')
            },
            {
                "title": "机器学习工程师",
                "company": "字节跳动",
                "salary": "35K-50K",
                "location": "北京",
                "experience": "3-5年",
                "education": "硕士及以上",
                "requirements": "熟悉机器学习算法，精通Python，有NLP或计算机视觉项目经验，熟悉PyTorch或TensorFlow",
                "industry": "互联网",
                "keyword": "机器学习",
                "crawl_date": datetime.now().strftime('%Y-%m-%d')
            }
        ]
        
        # 软件开发相关职位
        dev_jobs = [
            {
                "title": "高级后端开发工程师",
                "company": "美团",
                "salary": "30K-45K",
                "location": "北京",
                "experience": "5-10年",
                "education": "本科及以上",
                "requirements": "精通Java/Go, 熟悉分布式系统设计, 微服务架构, 高并发编程, 有大型互联网平台开发经验",
                "industry": "互联网",
                "keyword": "后端开发",
                "crawl_date": datetime.now().strftime('%Y-%m-%d')
            },
            {
                "title": "前端开发工程师",
                "company": "百度",
                "salary": "20K-35K",
                "location": "北京",
                "experience": "3-5年",
                "education": "本科及以上",
                "requirements": "精通JavaScript, HTML5, CSS3, 熟悉React/Vue等前端框架, 有移动端适配经验",
                "industry": "互联网",
                "keyword": "前端开发",
                "crawl_date": datetime.now().strftime('%Y-%m-%d')
            },
            {
                "title": "全栈工程师",
                "company": "网易",
                "salary": "25K-40K",
                "location": "杭州",
                "experience": "3-7年",
                "education": "本科及以上",
                "requirements": "前后端技术全面, 熟悉Node.js, React, Java等技术栈, 有完整项目经验",
                "industry": "互联网",
                "keyword": "全栈开发",
                "crawl_date": datetime.now().strftime('%Y-%m-%d')
            }
        ]
        
        # 人工智能相关职位
        ai_jobs = [
            {
                "title": "AI研究员",
                "company": "华为",
                "salary": "40K-60K",
                "location": "深圳",
                "experience": "5-10年",
                "education": "博士",
                "requirements": "在NLP/CV领域有深入研究, 有顶会论文发表, 熟悉深度学习框架, 有带领团队经验",
                "industry": "通信/人工智能",
                "keyword": "人工智能",
                "crawl_date": datetime.now().strftime('%Y-%m-%d')
            },
            {
                "title": "计算机视觉算法工程师",
                "company": "商汤科技",
                "salary": "30K-50K",
                "location": "上海",
                "experience": "3-5年",
                "education": "硕士及以上",
                "requirements": "熟悉目标检测、图像分割等CV算法, 有相关项目经验, 熟悉PyTorch/TensorFlow",
                "industry": "人工智能",
                "keyword": "计算机视觉",
                "crawl_date": datetime.now().strftime('%Y-%m-%d')
            },
            {
                "title": "NLP工程师",
                "company": "科大讯飞",
                "salary": "25K-45K",
                "location": "合肥",
                "experience": "3-6年",
                "education": "硕士及以上",
                "requirements": "熟悉自然语言处理算法, 有文本分类、情感分析、对话系统等项目经验",
                "industry": "人工智能/语音技术",
                "keyword": "自然语言处理",
                "crawl_date": datetime.now().strftime('%Y-%m-%d')
            }
        ]
        
        # 产品和运营职位
        product_jobs = [
            {
                "title": "产品经理",
                "company": "京东",
                "salary": "20K-35K",
                "location": "北京",
                "experience": "3-5年",
                "education": "本科及以上",
                "requirements": "有电商/O2O产品经验, 熟悉产品设计流程, 有较强的数据分析能力和用户体验意识",
                "industry": "电子商务",
                "keyword": "产品经理",
                "crawl_date": datetime.now().strftime('%Y-%m-%d')
            },
            {
                "title": "用户运营专员",
                "company": "小红书",
                "salary": "15K-25K",
                "location": "上海",
                "experience": "2-4年",
                "education": "本科及以上",
                "requirements": "有社区/内容平台运营经验, 了解用户增长机制, 有较强的数据分析能力",
                "industry": "互联网/内容社区",
                "keyword": "运营",
                "crawl_date": datetime.now().strftime('%Y-%m-%d')
            }
        ]
        
        # 合并所有职位数据
        jobs_data.extend(data_science_jobs)
        jobs_data.extend(dev_jobs)
        jobs_data.extend(ai_jobs)
        jobs_data.extend(product_jobs)
        
        # 复制数据以增加数据量
        jobs_data = jobs_data * 4
        
        # 转换为DataFrame
        jobs_df = pd.DataFrame(jobs_data)
        
        # 如果指定了关键词，筛选数据
        if keywords:
            filtered_jobs = []
            for keyword in keywords:
                mask = (
                    jobs_df['title'].str.contains(keyword, case=False, na=False) | 
                    jobs_df['requirements'].str.contains(keyword, case=False, na=False) |
                    jobs_df['industry'].str.contains(keyword, case=False, na=False) |
                    jobs_df['keyword'].str.contains(keyword, case=False, na=False)
                )
                filtered_jobs.append(jobs_df[mask].copy())
                filtered_jobs[-1]['keyword'] = keyword
            
            if filtered_jobs:
                jobs_df = pd.concat(filtered_jobs, ignore_index=True)
        
        logger.info(f"创建了静态数据集，共{len(jobs_df)}条记录")
        return jobs_df

    def crawl_boss_jobs(self, keywords, pages=5):
        """从BOSS直聘爬取职位数据"""
        all_jobs = []
        
        # 设置Chrome选项
        chrome_options = Options()
        # chrome_options.add_argument('--headless')  # 暂时注释掉无头模式，方便调试
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument(f'user-agent={self.headers["User-Agent"]}')
        
        # 添加更多反爬虫选项
        chrome_options.add_experimental_option('excludeSwitches', ['enable-automation'])
        chrome_options.add_experimental_option('useAutomationExtension', False)
        
        logger.info("初始化Chrome浏览器...")
        try:
            service = Service()  # 让WebDriver Manager自动处理驱动
            driver = webdriver.Chrome(service=service, options=chrome_options)
            # 设置窗口大小
            driver.set_window_size(1920, 1080)
            
            # 执行反爬虫JavaScript代码
            driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
                "source": """
                    Object.defineProperty(navigator, 'webdriver', {
                        get: () => undefined
                    })
                """
            })
            
        except Exception as e:
            logger.error(f"ChromeDriver初始化失败: {str(e)}")
            return pd.DataFrame()
        
        try:
            for keyword in keywords:
                logger.info(f"正在爬取关键词: {keyword}")
                
                for page in range(1, pages + 1):
                    try:
                        url = f"https://www.zhipin.com/web/geek/job?query={keyword}&page={page}"
                        logger.info(f"访问页面: {url}")
                        driver.get(url)
                        
                        # 增加随机等待时间
                        time.sleep(random.uniform(3, 5))
                        
                        # 等待职位列表加载，使用多个可能的选择器
                        try:
                            WebDriverWait(driver, 15).until(
                                EC.presence_of_element_located((By.CSS_SELECTOR, ".job-list-box"))
                            )
                        except:
                            logger.warning("无法找到职位列表容器，尝试其他选择器")
                            continue
                        
                        # 滚动页面以加载更多内容
                        driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                        time.sleep(2)
                        
                        # 获取所有职位卡片
                        job_cards = driver.find_elements(By.CSS_SELECTOR, ".job-card-wrapper")
                        
                        if not job_cards:
                            logger.warning(f"页面 {page} 未找到职位卡片")
                            continue
                            
                        logger.info(f"找到 {len(job_cards)} 个职位卡片")
                        
                        for card in job_cards:
                            try:
                                # 使用更可靠的选择器和异常处理
                                job_data = {
                                    'title': self._safe_get_text(card, ".job-name"),
                                    'company': self._safe_get_text(card, ".company-name"),
                                    'salary': self._safe_get_text(card, ".salary"),
                                    'location': self._safe_get_text(card, ".tag-list li:first-child"),
                                    'experience': self._safe_get_text(card, ".tag-list li:nth-child(2)"),
                                    'education': self._safe_get_text(card, ".tag-list li:nth-child(3)"),
                                    'requirements': self._safe_get_text(card, ".job-info-desc"),
                                    'industry': self._safe_get_text(card, ".company-tag-list li:first-child"),
                                    'keyword': keyword,
                                    'crawl_date': datetime.now().strftime('%Y-%m-%d')
                                }
                                
                                # 验证数据有效性
                                if job_data['title'] and job_data['company']:
                                    all_jobs.append(job_data)
                                    logger.debug(f"成功提取职位: {job_data['title']}")
                                
                            except Exception as e:
                                logger.error(f"解析职位卡片错误: {str(e)}")
                                continue
                        
                        # 增加随机延时
                        delay_time = random.uniform(5, 8)
                        logger.info(f"页面 {page} 爬取完成，延时 {delay_time:.1f} 秒")
                        time.sleep(delay_time)
                        
                    except Exception as e:
                        logger.error(f"爬取页面出错: {str(e)}")
                        time.sleep(5)
                        continue
                    
        finally:
            driver.quit()
        
        # 检查是否有数据
        if not all_jobs:
            logger.warning("没有爬取到任何职位数据!")
            return pd.DataFrame()
        
        # 转换为DataFrame并保存
        jobs_df = pd.DataFrame(all_jobs)
        
        # 确保数据目录存在
        os.makedirs("data", exist_ok=True)
        
        # 保存到CSV文件
        timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
        filename = f"data/jobs_{timestamp}.csv"
        jobs_df.to_csv(filename, index=False, encoding='utf-8')
        
        logger.info(f"共爬取 {len(jobs_df)} 条职位数据，已保存到 {filename}")
        return jobs_df
    
    def _safe_get_text(self, element, selector):
        """安全地获取元素文本
        
        Args:
            element: Selenium WebElement
            selector: CSS选择器
            
        Returns:
            str: 元素文本，如果未找到则返回空字符串
        """
        try:
            return element.find_element(By.CSS_SELECTOR, selector).text.strip()
        except:
            return ""
    
    def crawl_boss_jobs_bs4(self, keywords, pages=5):
        """使用BeautifulSoup从BOSS直聘网页版爬取职位数据
        
        Args:
            keywords (list): 搜索关键词列表
            pages (int): 每个关键词爬取的页数
            
        Returns:
            pd.DataFrame: 爬取的职位数据
        """
        all_jobs = []
        
        # 随机User-Agent
        user_agents = [
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15",
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0"
        ]
        
        for keyword in keywords:
            logger.info(f"正在爬取关键词: {keyword}")
            
            for page in range(1, pages + 1):
                try:
                    # 使用网页版URL
                    url = f"https://www.zhipin.com/web/geek/job?query={keyword}&page={page}"
                    
                    # 随机选择User-Agent
                    headers = {
                        "User-Agent": random.choice(user_agents),
                        "Accept": "text/html,application/xhtml+xml,application/xml",
                        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
                        "Referer": "https://www.zhipin.com/",
                        "Connection": "keep-alive",
                        "Cache-Control": "max-age=0"
                    }
                    
                    logger.info(f"请求URL: {url}")
                    response = requests.get(url, headers=headers)
                    
                    # 检查状态码
                    logger.info(f"响应状态码: {response.status_code}")
                    if response.status_code == 200:
                        soup = BeautifulSoup(response.text, 'html.parser')
                        
                        # 找到职位列表容器
                        job_list = soup.select('.job-list-box .job-card-wrapper')
                        logger.info(f"找到 {len(job_list)} 个职位卡片")
                        
                        for job_card in job_list:
                            try:
                                # 提取各字段信息
                                title = job_card.select_one('.job-name').text.strip() if job_card.select_one('.job-name') else ""
                                company = job_card.select_one('.company-name').text.strip() if job_card.select_one('.company-name') else ""
                                salary = job_card.select_one('.salary').text.strip() if job_card.select_one('.salary') else ""
                                
                                # 提取位置、经验和学历要求
                                tags = job_card.select('.tag-list li')
                                location = tags[0].text.strip() if len(tags) > 0 else ""
                                experience = tags[1].text.strip() if len(tags) > 1 else ""
                                education = tags[2].text.strip() if len(tags) > 2 else ""
                                
                                # 提取职位描述
                                requirements = job_card.select_one('.job-info-desc').text.strip() if job_card.select_one('.job-info-desc') else ""
                                
                                # 提取行业信息
                                industry_tags = job_card.select('.company-tag-list li')
                                industry = industry_tags[0].text.strip() if industry_tags else ""
                                
                                # 添加到结果列表
                                job_data = {
                                    'title': title,
                                    'company': company,
                                    'salary': salary,
                                    'requirements': requirements,
                                    'location': location,
                                    'education': education,
                                    'experience': experience,
                                    'industry': industry,
                                    'keyword': keyword,
                                    'crawl_date': datetime.now().strftime('%Y-%m-%d')
                                }
                                all_jobs.append(job_data)
                                
                            except Exception as e:
                                logger.error(f"解析职位卡片错误: {str(e)}")
                
                        # 添加随机延时
                        delay_time = random.uniform(3, 7)
                        logger.info(f"页面 {page} 处理完成，延时 {delay_time:.1f} 秒")
                        time.sleep(delay_time)
                        
                except Exception as e:
                    logger.error(f"爬取页面出错: {str(e)}")
                    time.sleep(5)
        
        # 确保数据目录存在
        os.makedirs("data", exist_ok=True)
        
        # 检查是否有数据
        if not all_jobs:
            logger.warning("没有爬取到任何职位数据!")
            return pd.DataFrame()
        
        # 转换为DataFrame
        jobs_df = pd.DataFrame(all_jobs)
        
        # 保存到CSV文件
        timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
        filename = f"data/jobs_{timestamp}.csv"
        jobs_df.to_csv(filename, index=False, encoding='utf-8')
        
        logger.info(f"共爬取 {len(jobs_df)} 条职位数据，已保存到 {filename}")
        return jobs_df
    
    def extract_job_skills(self, job_description):
        """从职位描述中提取技能要求
        
        Args:
            job_description (str): 职位描述文本
            
        Returns:
            list: 提取的技能列表
        """
        # 使用简单的关键词匹配，实际项目中应使用更复杂的NLP技术
        common_skills = [
            "Python", "Java", "C++", "JavaScript", "SQL", "数据分析",
            "机器学习", "深度学习", "自然语言处理", "计算机视觉",
            "大数据", "云计算", "DevOps", "敏捷开发", "项目管理"
        ]
        
        found_skills = []
        for skill in common_skills:
            if skill.lower() in job_description.lower():
                found_skills.append(skill)
                
        return found_skills

    def crawl_zhilian_jobs(self, keywords, pages=5):
        """从智联招聘爬取职位数据
        
        Args:
            keywords (list): 搜索关键词列表
            pages (int): 每个关键词爬取的页数
            
        Returns:
            pd.DataFrame: 爬取的职位数据
        """
        all_jobs = []
        
        # 设置Chrome选项
        chrome_options = Options()
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument(f'user-agent={self.headers["User-Agent"]}')
        
        logger.info("初始化Chrome浏览器...")
        try:
            service = Service()
            driver = webdriver.Chrome(service=service, options=chrome_options)
            driver.set_window_size(1920, 1080)
            
        except Exception as e:
            logger.error(f"ChromeDriver初始化失败: {str(e)}")
            return pd.DataFrame()
        
        try:
            for keyword in keywords:
                logger.info(f"正在爬取关键词: {keyword}")
                
                for page in range(1, pages + 1):
                    try:
                        # 智联招聘的搜索URL
                        url = f"https://sou.zhaopin.com/?jl=&kw={keyword}&p={page}"
                        logger.info(f"访问页面: {url}")
                        driver.get(url)
                        
                        # 等待页面加载
                        time.sleep(random.uniform(3, 5))
                        
                        # 等待职位列表加载
                        try:
                            WebDriverWait(driver, 15).until(
                                EC.presence_of_element_located((By.CLASS_NAME, "positionlist"))
                            )
                        except:
                            logger.warning("无法找到职位列表容器")
                            continue
                        
                        # 滚动页面加载更多内容
                        self._scroll_page(driver)
                        
                        # 获取所有职位卡片
                        job_cards = driver.find_elements(By.CLASS_NAME, "joblist-box__item")
                        
                        if not job_cards:
                            logger.warning(f"页面 {page} 未找到职位卡片")
                            continue
                            
                        logger.info(f"找到 {len(job_cards)} 个职位卡片")
                        
                        for card in job_cards:
                            try:
                                # 提取职位信息
                                title = self._safe_get_text(card, ".joblist-box__iteminfo .zp-joblist-job-name")
                                company = self._safe_get_text(card, ".joblist-box__iteminfo .company-name")
                                salary = self._safe_get_text(card, ".joblist-box__iteminfo .salary")
                                
                                # 获取标签信息
                                tags = card.find_elements(By.CSS_SELECTOR, ".joblist-box__iteminfo .tag-list__item")
                                tag_texts = [tag.text.strip() for tag in tags if tag.text.strip()]
                                
                                # 解析标签信息
                                location = tag_texts[0] if len(tag_texts) > 0 else ""
                                experience = tag_texts[1] if len(tag_texts) > 1 else ""
                                education = tag_texts[2] if len(tag_texts) > 2 else ""
                                
                                # 点击展开查看详情
                                try:
                                    card.click()
                                    time.sleep(random.uniform(1, 2))
                                    
                                    # 切换到新窗口
                                    windows = driver.window_handles
                                    driver.switch_to.window(windows[-1])
                                    
                                    # 等待详情页加载
                                    WebDriverWait(driver, 10).until(
                                        EC.presence_of_element_located((By.CLASS_NAME, "describtion__detail-content"))
                                    )
                                    
                                    # 获取详细要求
                                    requirements = self._safe_get_text(driver, ".describtion__detail-content")
                                    
                                    # 获取公司行业
                                    industry = self._safe_get_text(driver, ".company__industry")
                                    
                                    # 关闭详情页
                                    driver.close()
                                    driver.switch_to.window(windows[0])
                                    
                                except Exception as e:
                                    logger.error(f"获取职位详情失败: {str(e)}")
                                    requirements = ""
                                    industry = ""
                                
                                job_data = {
                                    'title': title,
                                    'company': company,
                                    'salary': salary,
                                    'location': location,
                                    'experience': experience,
                                    'education': education,
                                    'requirements': requirements,
                                    'industry': industry,
                                    'keyword': keyword,
                                    'crawl_date': datetime.now().strftime('%Y-%m-%d')
                                }
                                
                                if job_data['title'] and job_data['company']:
                                    all_jobs.append(job_data)
                                    logger.debug(f"成功提取职位: {job_data['title']}")
                                
                            except Exception as e:
                                logger.error(f"解析职位卡片错误: {str(e)}")
                                continue
                        
                        # 随机延时
                        delay_time = random.uniform(5, 8)
                        logger.info(f"页面 {page} 爬取完成，延时 {delay_time:.1f} 秒")
                        time.sleep(delay_time)
                        
                    except Exception as e:
                        logger.error(f"爬取页面出错: {str(e)}")
                        time.sleep(5)
                        continue
                    
        finally:
            driver.quit()
        
        # 检查数据
        if not all_jobs:
            logger.warning("没有爬取到任何职位数据!")
            return pd.DataFrame()
        
        # 转换为DataFrame并保存
        jobs_df = pd.DataFrame(all_jobs)
        
        # 确保数据目录存在
        os.makedirs("data", exist_ok=True)
        
        # 保存文件
        timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
        filename = f"data/jobs_{timestamp}.csv"
        jobs_df.to_csv(filename, index=False, encoding='utf-8')
        
        logger.info(f"共爬取 {len(jobs_df)} 条职位数据，已保存到 {filename}")
        return jobs_df

    def _scroll_page(self, driver):
        """模拟页面滚动"""
        last_height = driver.execute_script("return document.body.scrollHeight")
        
        while True:
            # 滚动到页面底部
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            
            # 等待页面加载
            time.sleep(random.uniform(1, 2))
            
            # 计算新的滚动高度并与上一次的滚动高度进行比较
            new_height = driver.execute_script("return document.body.scrollHeight")
            if new_height == last_height:
                break
            last_height = new_height

    def crawl_51job(self, keywords, pages=5):
        """从前程无忧爬取职位数据
        
        Args:
            keywords (list): 搜索关键词列表
            pages (int): 每个关键词爬取的页数
            
        Returns:
            pd.DataFrame: 爬取的职位数据
        """
        all_jobs = []
        
        # 设置Chrome选项
        chrome_options = Options()
        # chrome_options.add_argument('--headless')  # 调试时注释掉无头模式
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument(f'user-agent={self.headers["User-Agent"]}')
        
        logger.info("初始化Chrome浏览器...")
        try:
            service = Service()
            driver = webdriver.Chrome(service=service, options=chrome_options)
            driver.set_window_size(1920, 1080)
            
        except Exception as e:
            logger.error(f"ChromeDriver初始化失败: {str(e)}")
            return pd.DataFrame()
        
        try:
            for keyword in keywords:
                logger.info(f"正在爬取关键词: {keyword}")
                
                for page in range(1, pages + 1):
                    try:
                        # 前程无忧的搜索URL
                        url = f"https://we.51job.com/pc/search?keyword={keyword}&searchType=2&sortType=0&metro=&pageNum={page}"
                        logger.info(f"访问页面: {url}")
                        driver.get(url)
                        
                        # 等待页面加载
                        time.sleep(random.uniform(3, 5))
                        
                        # 等待职位列表加载
                        try:
                            WebDriverWait(driver, 15).until(
                                EC.presence_of_element_located((By.CLASS_NAME, "j51job-list"))
                            )
                        except:
                            logger.warning("无法找到职位列表容器")
                            continue
                        
                        # 滚动页面加载更多内容
                        self._scroll_page(driver)
                        
                        # 获取所有职位卡片
                        job_cards = driver.find_elements(By.CLASS_NAME, "items")
                        
                        if not job_cards:
                            logger.warning(f"页面 {page} 未找到职位卡片")
                            continue
                            
                        logger.info(f"找到 {len(job_cards)} 个职位卡片")
                        
                        for card in job_cards:
                            try:
                                # 提取职位信息
                                job_data = {
                                    'title': self._safe_get_text(card, ".jname.at"),
                                    'company': self._safe_get_text(card, ".cname.at"),
                                    'salary': self._safe_get_text(card, ".sal"),
                                    'location': self._safe_get_text(card, ".d.at"),
                                    'experience': "",  # 将在详情页获取
                                    'education': "",   # 将在详情页获取
                                    'requirements': "", # 将在详情页获取
                                    'industry': "",    # 将在详情页获取
                                    'keyword': keyword,
                                    'crawl_date': datetime.now().strftime('%Y-%m-%d')
                                }
                                
                                # 获取详情页链接
                                try:
                                    detail_link = card.find_element(By.CSS_SELECTOR, ".jname.at").get_attribute("href")
                                    if detail_link:
                                        # 打开新标签页
                                        driver.execute_script(f"window.open('{detail_link}', '_blank');")
                                        time.sleep(random.uniform(2, 3))
                                        
                                        # 切换到新标签页
                                        driver.switch_to.window(driver.window_handles[-1])
                                        
                                        # 等待详情页加载
                                        WebDriverWait(driver, 10).until(
                                            EC.presence_of_element_located((By.CLASS_NAME, "job_msg"))
                                        )
                                        
                                        # 获取详细信息
                                        job_data['requirements'] = self._safe_get_text(driver, ".job_msg")
                                        job_data['experience'] = self._safe_get_text(driver, ".msg.ltype")
                                        job_data['education'] = self._safe_get_text(driver, ".msg.ltype")
                                        job_data['industry'] = self._safe_get_text(driver, ".com_tag")
                                        
                                        # 关闭详情页
                                        driver.close()
                                        driver.switch_to.window(driver.window_handles[0])
                                        
                                except Exception as e:
                                    logger.error(f"获取职位详情失败: {str(e)}")
                                
                                # 只添加有效数据
                                if job_data['title'] and job_data['company']:
                                    all_jobs.append(job_data)
                                    logger.debug(f"成功提取职位: {job_data['title']}")
                                
                            except Exception as e:
                                logger.error(f"解析职位卡片错误: {str(e)}")
                                continue
                        
                        # 随机延时
                        delay_time = random.uniform(5, 8)
                        logger.info(f"页面 {page} 爬取完成，延时 {delay_time:.1f} 秒")
                        time.sleep(delay_time)
                        
                    except Exception as e:
                        logger.error(f"爬取页面出错: {str(e)}")
                        time.sleep(5)
                        continue
                    
        finally:
            driver.quit()
        
        # 检查数据
        if not all_jobs:
            logger.warning("没有爬取到任何职位数据!")
            return pd.DataFrame()
        
        # 转换为DataFrame并保存
        jobs_df = pd.DataFrame(all_jobs)
        
        # 确保数据目录存在
        os.makedirs("data", exist_ok=True)
        
        # 保存文件
        timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
        filename = f"data/jobs_{timestamp}.csv"
        jobs_df.to_csv(filename, index=False, encoding='utf-8')
        
        logger.info(f"共爬取 {len(jobs_df)} 条职位数据，已保存到 {filename}")
        return jobs_df

    def fetch_jobs_from_api(self, keywords, pages=3):
        """从开放API获取职位数据
        
        Args:
            keywords (list): 搜索关键词列表
            pages (int): 每个关键词请求的页数
            
        Returns:
            pd.DataFrame: 获取的职位数据
        """
        all_jobs = []
        logger.info("开始从API获取职位数据...")
        
        # 使用GitHub Jobs API作为示例 
        # (注意：GitHub Jobs API已不再维护，这里仅作示例)
        # 可替换为其他开放API
        
        for keyword in keywords:
            logger.info(f"搜索关键词: {keyword}")
            
            for page in range(1, pages + 1):
                try:
                    # 构建API请求URL
                    url = f"https://jobs.github.com/positions.json?description={keyword}&page={page}"
                    
                    headers = {
                        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
                    }
                    
                    # 发送API请求
                    response = requests.get(url, headers=headers, timeout=10)
                    
                    if response.status_code == 200:
                        jobs_data = response.json()
                        logger.info(f"获取到 {len(jobs_data)} 条职位信息")
                        
                        for job in jobs_data:
                            job_data = {
                                'title': job.get('title', ''),
                                'company': job.get('company', ''),
                                'salary': '面议',  # API中通常没有薪资信息
                                'location': job.get('location', ''),
                                'experience': '',  # API中通常没有经验要求信息
                                'education': '',   # API中通常没有教育要求信息
                                'requirements': job.get('description', ''),
                                'industry': job.get('type', ''),
                                'keyword': keyword,
                                'crawl_date': datetime.now().strftime('%Y-%m-%d')
                            }
                            
                            all_jobs.append(job_data)
                            
                    time.sleep(random.uniform(1, 3))  # 请求间隔
                    
                except Exception as e:
                    logger.error(f"API请求失败: {str(e)}")
                    time.sleep(3)
        
        # 检查是否有数据
        if not all_jobs:
            logger.warning("没有从API获取到任何职位数据!")
            return pd.DataFrame()
        
        # 转换为DataFrame
        jobs_df = pd.DataFrame(all_jobs)
        
        # 保存到CSV
        os.makedirs("data", exist_ok=True)
        timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
        filename = f"data/jobs_{timestamp}.csv"
        jobs_df.to_csv(filename, index=False, encoding='utf-8')
        
        logger.info(f"共获取 {len(jobs_df)} 条职位数据，已保存到 {filename}")
        return jobs_df

    def crawl_with_proxy(self, keywords, pages=3, proxy_url=None):
        """使用代理爬取职位数据
        
        Args:
            keywords (list): 搜索关键词列表
            pages (int): 每个关键词爬取的页数
            proxy_url (str): 代理服务API或代理服务器地址
            
        Returns:
            pd.DataFrame: 爬取的职位数据
        """
        # 此处添加使用代理的爬虫代码
        # 需要根据您使用的代理服务进行定制
        pass 

    def scrape_liepin_jobs(self, keywords, pages=3):
        """使用BeautifulSoup从猎聘网爬取职位数据
        
        Args:
            keywords (list): 搜索关键词列表
            pages (int): 每个关键词爬取的页数
            
        Returns:
            pd.DataFrame: 爬取的职位数据
        """
        all_jobs = []
        
        logger.info("开始从猎聘网爬取职位数据...")
        
        for keyword in keywords:
            logger.info(f"正在爬取关键词: {keyword}")
            
            for page in range(1, pages + 1):
                try:
                    # 构建URL - 猎聘网使用GB2312编码
                    encoded_keyword = quote(keyword)
                    url = f"https://www.liepin.com/zhaopin/?key={encoded_keyword}&curPage={page}"
                    
                    # 设置请求头
                    headers = {
                        "User-Agent": self.get_random_user_agent(),
                        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
                        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
                        "Connection": "keep-alive",
                        "Referer": "https://www.liepin.com/"
                    }
                    
                    logger.info(f"请求页面: {url}")
                    
                    # 发送请求
                    response = requests.get(url, headers=headers, timeout=15)
                    response.raise_for_status()
                    
                    # 解析HTML
                    soup = BeautifulSoup(response.text, 'html.parser')
                    
                    # 获取职位列表
                    job_cards = soup.select(".job-list-item")
                    
                    if not job_cards:
                        logger.warning(f"在页面 {page} 未找到职位卡片")
                        
                        # 尝试其他可能的选择器
                        job_cards = soup.select(".job-card-container")
                        if not job_cards:
                            job_cards = soup.select(".job-card")
                        
                        if not job_cards:
                            logger.warning("尝试所有已知选择器后仍未找到职位列表")
                            continue
                    
                    logger.info(f"找到 {len(job_cards)} 个职位卡片")
                    
                    # 处理每个职位卡片
                    for card in job_cards:
                        try:
                            # 提取职位信息(根据网站HTML结构调整选择器)
                            title_elem = card.select_one(".job-title") or card.select_one(".job-name")
                            title = title_elem.text.strip() if title_elem else ""
                            
                            company_elem = card.select_one(".company-name") or card.select_one(".company-title")
                            company = company_elem.text.strip() if company_elem else ""
                            
                            salary_elem = card.select_one(".salary") or card.select_one(".job-salary")
                            salary = salary_elem.text.strip() if salary_elem else "面议"
                            
                            # 提取其他信息
                            location = ""
                            experience = ""
                            education = ""
                            
                            # 提取地点、经验、学历信息
                            condition_elems = card.select(".condition-item") or card.select(".job-labels span")
                            for i, elem in enumerate(condition_elems):
                                text = elem.text.strip()
                                if i == 0:
                                    location = text
                                elif i == 1:
                                    experience = text
                                elif i == 2:
                                    education = text
                            
                            # 提取职位描述
                            desc_elem = card.select_one(".job-description") or card.select_one(".job-detail")
                            requirements = desc_elem.text.strip() if desc_elem else ""
                            
                            # 创建职位数据
                            job_data = {
                                'title': title,
                                'company': company,
                                'salary': salary,
                                'location': location,
                                'experience': experience,
                                'education': education,
                                'requirements': requirements,
                                'industry': "IT/互联网",
                                'keyword': keyword,
                                'crawl_date': datetime.now().strftime('%Y-%m-%d')
                            }
                            
                            # 只添加有效数据
                            if job_data['title'] and job_data['company']:
                                all_jobs.append(job_data)
                                logger.debug(f"成功解析职位: {job_data['title']} @ {job_data['company']}")
                        
                        except Exception as e:
                            logger.error(f"解析职位卡片时出错: {str(e)}")
                    
                    # 随机延时
                    delay = random.uniform(3, 6)
                    logger.info(f"页面 {page} 处理完成，延时 {delay:.1f} 秒")
                    time.sleep(delay)
                    
                except Exception as e:
                    logger.error(f"爬取页面时出错: {str(e)}")
                    time.sleep(3)
        
        # 检查数据
        if not all_jobs:
            logger.warning("没有爬取到任何职位数据，将使用离线数据")
            return self.load_job_dataset(keywords=keywords)
        
        # 转换为DataFrame并保存
        jobs_df = pd.DataFrame(all_jobs)
        
        # 保存文件
        os.makedirs("data", exist_ok=True)
        timestamp = datetime.now().strftime('%Y%m%d%H%M%S')
        filename = f"data/jobs_{timestamp}.csv"
        jobs_df.to_csv(filename, index=False, encoding='utf-8')
        
        logger.info(f"共爬取到 {len(jobs_df)} 条职位数据，已保存至 {filename}")
        return jobs_df 