import requests
from bs4 import BeautifulSoup
import json
import re
import time
from urllib.parse import quote


def scrape_interview_questions():
    # 使用多个备选网站
    sources = [
        {
            "name": "牛客网",
            "url": "https://www.nowcoder.com/intelligent/{position}",
            "selector": ".question-item"
        },
        {
            "name": "LeetCode",
            "url": "https://leetcode.cn/problemset/all/?search={position}",
            "selector": ".question-title"
        },
        {
            "name": "看准网",
            "url": "https://www.kanzhun.com/mshrm/interview/?q={position}",
            "selector": ".interview-question-item"
        }
    ]

    positions = [
        "java工程师",
        "python工程师",
        "前端开发",
        "数据分析师",
        "产品经理"
    ]

    result = {pos: [] for pos in positions}

    for position in positions:
        print(f"正在爬取 {position} 的面试题...")

        for source in sources:
            try:
                url = source["url"].format(position=quote(position))
                headers = {
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
                    "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8"
                }

                print(f"尝试从 {source['name']} 获取数据...")
                response = requests.get(url, headers=headers, timeout=15)

                if response.status_code == 200:
                    soup = BeautifulSoup(response.text, 'html.parser')
                    question_blocks = soup.select(source["selector"])

                    if not question_blocks:
                        print(f"在 {source['name']} 未找到匹配元素，尝试下一个来源")
                        continue

                    for block in question_blocks:
                        try:
                            # 根据网站结构提取数据
                            q_title = block.get_text(strip=True)
                            q_content = ""

                            # 尝试获取详细内容（不同网站结构不同）
                            if source["name"] == "牛客网":
                                q_content = block.select_one(".question-desc").get_text(strip=True) if block.select_one(
                                    ".question-desc") else ""
                            elif source["name"] == "LeetCode":
                                q_content = block.find_next("div", class_="content").get_text(
                                    strip=True) if block.find_next("div", class_="content") else ""
                            else:
                                q_content = block.get_text(strip=True)

                            # 添加到结果
                            result[position].append({
                                "question": q_title[:100],  # 限制长度
                                "description": q_content[:300],  # 限制长度
                                "source": source["name"]
                            })

                        except Exception as e:
                            print(f"处理单个问题时出错: {e}")

                    print(f"从 {source['name']} 获取到 {len(question_blocks)} 道题目")
                    break  # 成功获取后跳出循环

                else:
                    print(f"{source['name']} 返回状态码: {response.status_code}")

            except Exception as e:
                print(f"从 {source['name']} 爬取失败: {str(e)}")

            time.sleep(2)  # 请求间隔

    return result


def save_to_json(data, filename="interview_questions.json"):
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=2)
    print(f"数据已保存至 {filename}")


if __name__ == "__main__":
    interview_data = scrape_interview_questions()
    save_to_json(interview_data)