import requests
import pandas as pd
from bs4 import BeautifulSoup
import time
import re
from urllib.parse import quote
from concurrent.futures import ThreadPoolExecutor, as_completed
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

class AIJobAnalyzer:
    def __init__(self):
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1'
        }
        self.base_url = 'https://sou.zhaopin.com'
        
        # 创建会话并配置连接池和重试策略
        self.session = requests.Session()
        retry_strategy = Retry(
            total=3,
            backoff_factor=1,
            status_forcelist=[500, 502, 503, 504]
        )
        adapter = HTTPAdapter(max_retries=retry_strategy, pool_connections=10, pool_maxsize=100)
        self.session.mount("http://", adapter)
        self.session.mount("https://", adapter)
        
        # 扩展的AI相关关键词
        self.ai_keywords = {
            # 核心AI关键词
            'core': ['人工智能', 'AI', '机器学习', '深度学习', '算法', '算法工程师'],
            # 数据科学
            'data': ['数据科学家', '数据分析', '数据挖掘', '大数据', '数据架构'],
            # 计算机视觉
            'vision': ['计算机视觉', '图像识别', '视觉算法', '图像处理', 'CV'],
            # 自然语言处理
            'nlp': ['NLP', '自然语言处理', '文本挖掘', '语音识别', '对话系统'],
            # 深度学习框架
            'framework': ['TensorFlow', 'PyTorch', 'Keras', 'caffe', 'mxnet'],
            # 大模型
            'llm': ['大模型', 'LLM', 'GPT', 'ChatGPT', 'AIGC', 'Prompt', 'LangChain'],
            # 其他AI技术
            'other': ['神经网络', '强化学习', '推荐系统', '知识图谱', '智能驾驶']
        }
        
        # 需要过滤的无关文本
        self.filter_texts = [
            '立即沟通',
            '下载智联APP',
            '我聊聊吧',
            '智联APP',
            'APP',
            '下载',
            '聊聊'
        ]
    
    def clean_text(self, text):
        """清理文本，移除无关内容"""
        if not text:
            return text
            
        # 移除换行符和多余空格
        text = re.sub(r'\s+', ' ', text).strip()
        
        # 移除过滤文本
        for filter_text in self.filter_texts:
            text = text.replace(filter_text, '')
        
        # 再次清理可能产生的多余空格
        text = re.sub(r'\s+', ' ', text).strip()
        
        return text
    
    def read_companies_from_excel(self, filename):
        """从Excel文件中读取公司名称列表"""
        try:
            df = pd.read_excel(filename)
            # 公司名称在第二列
            company_column = df.columns[1]
            companies = df[company_column].dropna().tolist()
            print(f"成功读取 {len(companies)} 家公司")
            return companies
        except Exception as e:
            print(f"读取Excel文件失败: {e}")
            return []
    
    def get_company_jobs(self, company_name):
        """获取公司的所有职位列表"""
        try:
            # 构造搜索URL
            search_query = company_name
            encoded_query = quote(search_query)
            url = f"{self.base_url}/?jl=489&kw={encoded_query}&kt=3"
            
            response = self.session.get(url, headers=self.headers, timeout=10)
            response.raise_for_status()
            
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 获取职位列表
            job_items = soup.select('.joblist-item, .job-item, [class*="job"]')
            jobs = []
            
            for item in job_items:
                try:
                    # 获取职位名称 - 使用正确的选择器
                    title_elem = item.select_one('.jobinfo__name')
                    if title_elem:
                        title = self.clean_text(title_elem.get_text().strip())
                    else:
                        # 如果没找到，尝试其他选择器
                        title_selectors = [
                            '.job-title',
                            '.jobname',
                            '.job_name',
                            '[class*="title"]',
                            '.position',
                            '.position-name',
                            'h3',
                            'h4'
                        ]
                        
                        title = None
                        for selector in title_selectors:
                            title_elem = item.select_one(selector)
                            if title_elem:
                                title = self.clean_text(title_elem.get_text().strip())
                                if title:  # 确保获取到非空文本
                                    break
                    
                    if not title:
                        continue
                    
                    # 获取职位描述
                    desc_selectors = [
                        '.jobinfo__desc',
                        '.job-desc',
                        '.job-desc-text',
                        '.job-detail',
                        '[class*="desc"]'
                    ]
                    
                    desc = ""
                    for selector in desc_selectors:
                        desc_elem = item.select_one(selector)
                        if desc_elem:
                            desc = self.clean_text(desc_elem.get_text().strip())
                            if desc:
                                break
                    
                    # 获取薪资信息
                    salary_selectors = [
                        '.jobinfo__salary',
                        '.salary',
                        '.job-salary',
                        '[class*="salary"]'
                    ]
                    
                    salary = ""
                    for selector in salary_selectors:
                        salary_elem = item.select_one(selector)
                        if salary_elem:
                            salary = self.clean_text(salary_elem.get_text().strip())
                            if salary:
                                break
                    
                    # 只有包含薪资信息的职位才添加到列表
                    if salary:
                        jobs.append({
                            '公司名称': company_name,
                            '职位名称': title,
                            '职位描述': desc,
                            '薪资': salary
                        })
                    
                except Exception as e:
                    print(f"处理职位信息时出错: {e}")
                    continue
            
            return jobs
            
        except Exception as e:
            print(f"获取 {company_name} 的职位列表时出错: {e}")
            return []
    
    def collect_all_jobs(self, companies):
        """收集所有公司的职位信息"""
        all_jobs = []
        
        total_companies = len(companies)
        
        # 使用线程池并发处理多个公司
        with ThreadPoolExecutor(max_workers=3) as executor:
            # 提交所有公司的职位收集任务
            future_to_company = {
                executor.submit(self.get_company_jobs, company): company
                for company in companies
            }
            
            # 收集结果
            for future in as_completed(future_to_company):
                try:
                    jobs = future.result()
                    all_jobs.extend(jobs)
                    print(f"完成 {future_to_company[future]} 的职位收集，共 {len(jobs)} 个职位")
                except Exception as e:
                    print(f"收集 {future_to_company[future]} 的职位时出错: {e}")
                
                # 每完成一个公司后短暂暂停，避免请求过于密集
                time.sleep(0.5)
        
        return all_jobs
    
    def is_ai_job(self, job):
        """判断是否为AI相关职位"""
        title = str(job['职位名称']).lower()
        desc = str(job['职位描述']).lower()
        
        # 检查每个类别的关键词
        for category, keywords in self.ai_keywords.items():
            for keyword in keywords:
                keyword_lower = keyword.lower()
                # 在职位名称或描述中查找关键词
                if keyword_lower in title or keyword_lower in desc:
                    return True
                    
        # 检查常见的AI相关技能
        ai_skills = [
            'python', 'tensorflow', 'pytorch', 'keras', 'caffe',
            '机器学习', '深度学习', '神经网络', 'nlp', 'cv',
            '数据挖掘', '数据分析', '推荐系统', '算法'
        ]
        
        for skill in ai_skills:
            if skill in title or skill in desc:
                return True
                
        return False
    
    def analyze_jobs(self, all_jobs):
        """分析所有职位中的AI岗位"""
        # 转换为DataFrame
        df = pd.DataFrame(all_jobs)
        
        # 添加是否为AI岗位的标记
        df['是否为AI岗位'] = df.apply(self.is_ai_job, axis=1)
        
        # 保存所有职位信息为CSV
        self.save_jobs_to_csv(df, 'all_jobs.csv')
        
        # 统计每个公司的AI岗位情况
        company_stats = df.groupby('公司名称').agg({
            '职位名称': 'count',  # 总职位数
            '是否为AI岗位': 'sum'  # AI职位数
        }).rename(columns={
            '职位名称': '总职位数',
            '是否为AI岗位': 'AI相关职位数'
        })
        
        # 计算AI岗位占比
        company_stats['AI岗位占比(%)'] = (
            company_stats['AI相关职位数'] / company_stats['总职位数'] * 100
        ).round(2)
        
        # 重置索引，将公司名称作为列
        company_stats = company_stats.reset_index()
        
        return company_stats
    
    def save_jobs_to_csv(self, df, filename):
        """将职位信息保存到CSV文件"""
        try:
            df.to_csv(filename, index=False, encoding='utf-8-sig')
            print(f"所有职位信息已保存到 {filename}")
            return True
        except Exception as e:
            print(f"保存职位信息失败: {e}")
            return False
    
    def save_results_to_csv(self, results, filename):
        """将分析结果保存到CSV文件"""
        try:
            df = pd.DataFrame(results)
            df.to_csv(filename, index=False, encoding='utf-8-sig')
            print(f"分析结果已保存到 {filename}")
            return True
        except Exception as e:
            print(f"保存分析结果失败: {e}")
            return False
    
    def run_analysis(self, input_file='company.xlsx', output_file='results.csv'):
        """运行完整分析流程"""
        print("开始AI岗位占比分析...")
        
        # 读取公司列表
        companies = self.read_companies_from_excel(input_file)
        if not companies:
            print("没有读取到任何公司数据")
            return False
        
        # 收集所有公司的职位信息
        print("\n开始收集所有公司的职位信息...")
        all_jobs = self.collect_all_jobs(companies)
        print(f"\n总共收集到 {len(all_jobs)} 个有效职位（包含薪资信息）")
        
        # 分析AI岗位占比
        print("\n开始分析AI岗位占比...")
        results = self.analyze_jobs(all_jobs)
        
        # 按AI岗位占比排序
        results = results.sort_values('AI岗位占比(%)', ascending=False)
        
        # 保存结果
        self.save_results_to_csv(results, output_file)
        
        # 打印统计信息
        total_companies = len(results)
        companies_with_ai = sum(1 for _, r in results.iterrows() if r['AI岗位占比(%)'] > 0)
        avg_ratio = results['AI岗位占比(%)'].mean()
        
        print(f"\n分析完成！")
        print(f"总计分析公司: {total_companies} 家")
        print(f"有AI岗位的公司: {companies_with_ai} 家")
        print(f"平均AI岗位占比: {avg_ratio:.2f}%")
        
        return True

if __name__ == "__main__":
    analyzer = AIJobAnalyzer()
    analyzer.run_analysis()
