from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
import pandas as pd
import time
import random
import matplotlib.pyplot as plt
import seaborn as sns

# 专家URL列表
expert_urls = [
    "https://www.cmzj.net/expertItem?id=2372780",
    "https://www.cmzj.net/expertItem?id=1839240",
    "https://www.cmzj.net/expertItem?id=2835466",
    "https://www.cmzj.net/expertItem?id=2379124",
    "https://www.cmzj.net/expertItem?id=2937611",
    "https://www.cmzj.net/expertItem?id=2323414",
    "https://www.cmzj.net/expertItem?id=2325931",
    "https://www.cmzj.net/expertItem?id=2199744",
    "https://www.cmzj.net/expertItem?id=2326725",
    "https://www.cmzj.net/expertItem?id=2290189",
    "https://www.cmzj.net/expertItem?id=2037784",
    "https://www.cmzj.net/expertItem?id=2370927",
    "https://www.cmzj.net/expertItem?id=1689690",
    "https://www.cmzj.net/expertItem?id=2295059",
    "https://www.cmzj.net/expertItem?id=1935616",
    "https://www.cmzj.net/expertItem?id=1961120",
    "https://www.cmzj.net/expertItem?id=1724513",
    "https://www.cmzj.net/expertItem?id=1921296",
    "https://www.cmzj.net/expertItem?id=2372605",
    "https://www.cmzj.net/expertItem?id=2374811"
]

# 启动 headless 浏览器
options = Options()
options.add_argument('--headless')
options.add_argument('--disable-gpu')
driver = webdriver.Chrome(options=options)

def get_expert_data(url):
    data = {
        "url": url,
        "昵称": None,
        "彩龄": 0,
        "文章数量": 0,
        "粉丝数量": 0,
        "双色球一等奖得奖数": 0,
        "双色球二等奖得奖数": 0,
        "双色球三等奖得奖数": 0,
    }

    try:
        driver.get(url)
        time.sleep(1)  # 等待 JS 渲染
        # 使用 html.parser 替代 lxml
        soup = BeautifulSoup(driver.page_source, "html.parser")

        # 姓名
        name_tag = soup.find("p", class_="okami-name")
        if name_tag:
            data["昵称"] = name_tag.text.strip()

        # 彩龄/文章数量
        okami_text = soup.find("div", class_="okami-text")
        if okami_text:
            for p in okami_text.find_all("p"):
                if "彩龄" in p.text:
                    val = p.find("span").text.replace("年", "").strip()
                    data["彩龄"] = int(val) if val.isdigit() else 0
                elif "文章数量" in p.text:
                    val = p.find("span").text.replace("篇", "").strip()
                    data["文章数量"] = int(val) if val.isdigit() else 0

        # 粉丝数
        fans_tag = soup.find("p", class_="okami-number")
        if fans_tag and "关注数量" in fans_tag.text:
            val = fans_tag.text.replace("关注数量：", "").replace("人", "").strip()
            data["粉丝数量"] = int(val) if val.isdigit() else 0

        # 双色球中奖
        djzj_list = soup.find_all("div", class_="djzj")
        for block in djzj_list:
            head = block.find("span", class_="text-head-bg")
            if head and "双色球" in head.text:
                for item in block.find_all("div", class_="item"):
                    text = item.text
                    span = item.find("span")
                    if not span: continue
                    count = span.text.replace("次", "").strip()
                    if not count.isdigit(): continue
                    count = int(count)
                    if "一等奖" in text:
                        data["双色球一等奖得奖数"] = count
                    elif "二等奖" in text:
                        data["双色球二等奖得奖数"] = count
                    elif "三等奖" in text:
                        data["双色球三等奖得奖数"] = count

    except Exception as e:
        print(f"错误：{url} -> {e}")

    return data

# 爬取所有专家
all_data = []
for i, url in enumerate(expert_urls, 1):
    print(f"抓取专家 {i}/{len(expert_urls)}")
    d = get_expert_data(url)
    all_data.append(d)
    time.sleep(random.uniform(1.5, 3))

driver.quit()

# 保存结果
df = pd.DataFrame(all_data)
df.to_csv("专家数据_双色球.csv", index=False, encoding="utf-8-sig")
print("✅ 数据已保存为：专家数据_双色球.csv")

# 读取数据
df = pd.read_csv("专家数据_双色球.csv")

# 设置中文字体
plt.rcParams['font.family'] = 'Microsoft YaHei'
plt.rcParams['axes.unicode_minus'] = False

# 1. 堆叠条形图：专家中奖次数
df_sorted = df.sort_values(by='双色球一等奖得奖数', ascending=False)
names = df_sorted['昵称']
first = df_sorted['双色球一等奖得奖数']
second = df_sorted['双色球二等奖得奖数']
third = df_sorted['双色球三等奖得奖数']

plt.figure(figsize=(12, 6))
plt.bar(names, first, label='一等奖', color='salmon')
plt.bar(names, second, bottom=first, label='二等奖', color='skyblue')
plt.bar(names, third, bottom=first+second, label='三等奖', color='lightgreen')
plt.title("专家双色球中奖次数（堆叠图）")
plt.xlabel("专家昵称")
plt.ylabel("中奖次数")
plt.xticks(rotation=45)
plt.legend()
plt.tight_layout()
plt.savefig("专家双色球中奖次数条形图.png", dpi=300, bbox_inches='tight')
plt.show()

# 2. 散点图：彩龄 vs 发文量
plt.figure(figsize=(8, 6))
sns.scatterplot(data=df, x='彩龄', y='文章数量', hue='昵称', s=100)
plt.title("彩龄 vs 发文量（按专家区分）")
plt.xlabel("彩龄（年）")
plt.ylabel("文章数量")
plt.tight_layout()
plt.savefig("彩龄-发文量散点图.png", dpi=300, bbox_inches='tight')
plt.show()

# 3. 热力图：特征相关性分析
plt.figure(figsize=(8, 6))
corr = df[['彩龄', '文章数量', '粉丝数量', '双色球一等奖得奖数', '双色球二等奖得奖数','双色球三等奖得奖数']].corr()
sns.heatmap(corr, annot=True, cmap='coolwarm', fmt=".2f")
plt.title("专家属性与中奖表现相关性热力图")
plt.tight_layout()
plt.savefig("属性-中奖相关性热力图.png", dpi=300, bbox_inches='tight')
plt.show()

# 4. 气泡图：彩龄 vs 一等奖数
plt.figure(figsize=(10, 6))
sns.scatterplot(
    data=df,
    x='彩龄',
    y='双色球一等奖得奖数',
    size='粉丝数量',
    hue='昵称',
    sizes=(50, 500),
    alpha=0.7
)
plt.title("彩龄 vs 一等奖次数（气泡大小代表粉丝数）")
plt.xlabel("彩龄（年）")
plt.ylabel("一等奖次数")
plt.tight_layout()
plt.savefig("彩龄-一等奖气泡图.png", dpi=300, bbox_inches='tight')
plt.show()