import pandas as pd
from selenium import webdriver
import time
from lxml import etree
from selenium.webdriver.chrome.options import Options

# 关闭浏览器显示
chrome_options = Options()
chrome_options.add_argument("--headless")       # define headless
driver = webdriver.Chrome(chrome_options=chrome_options)

# 创建（用户，ID，链接，粉丝，简介）列表
id_list = []
href_list = []
fans_list = []
info_list = []
user1_list = []


for k in range(1,200):
    url = 'https://d.weibo.com/1087030002_2975_2003_0?page='+str(k)
    driver.get(url)
    # 使用selenium获取页面，等待10秒，使页面加载完
    time.sleep(10)
    html = etree.HTML(driver.page_source)

    # 使用xpath对页面进行解析，提取出相应内容
    hrefs = html.xpath('//*[@id="Pl_Core_F4RightUserList__4"]/div/div/div/div/div[1]/ul//li/dl/dd[1]/div[1]/a[1]/@href')    # 用户主页地址
    users = html.xpath('//*[@id="Pl_Core_F4RightUserList__4"]/div/div/div/div/div[1]/ul//li/dl/dd[1]/div[1]/a[1]/@title')   # 用户名
    fans = html.xpath('//*[@id="Pl_Core_F4RightUserList__4"]/div/div/div/div/div[1]/ul//li/dl/dd[1]/div[2]/span[2]/em/text()')  # 粉丝数
    information = html.xpath('//*[@id="Pl_Core_F4RightUserList__4"]/div/div/div/div/div[1]/ul//li/dl/dd[1]/div[4]/span')    # 简介/微博认证
    user_id = html.xpath('//*[@id="Pl_Core_F4RightUserList__4"]/div/div/div/div/div[1]/ul//li/dl/dd[1]/div[1]/a[1]/@usercard')  # 用户ID

    # 将每一页的列表内容转到总列表中
    for href in hrefs:
        href_list.append(href)
    for fan in fans:
        nfan = fan.replace('万','0000')
        fans_list.append(nfan)
    for info in information:
        info_list.append(info.text)
    for user in users:
        user1_list.append(user)
    for ID in user_id:
        id = ID[3:13]
        id_list.append(id)

    # 保存为CSV文件
    show_table = {
        '用户名': user1_list,
        '简介/微博认证': info_list,
        '粉丝数': fans_list,
        '用户ID': id_list,
        '微博主页地址': href_list
    }
    show_table_df = pd.DataFrame(show_table)
    show_table_df.to_csv('user_write.csv', index=False, encoding='UTF-8')

    print("********第"+str(k)+"页用户爬取完成*********")

# 创建（用户名，发布时间，正文内容，点赞数，评论数，转发数）总列表
user2_list = []
Time_list = []
Text_list = []
DianZan_list = []
ZhuanFa_list = []
PingLun_list = []


for ID in id_list:
    url = 'https://m.weibo.cn/u/'+str(ID)
    driver.get(url)
    # 使用selenium获取页面，等待10秒，使页面加载完
    time.sleep(10)
    html = etree.HTML(driver.page_source)

    # 使用selenium库对页面进行解析，提取出相应内容
    Text = driver.find_elements_by_xpath('//*[@id]/div/div/div/div/div/article/div[1]/div[1]')  # 正文
    users = driver.find_elements_by_xpath("//*[@id]/div/div/div/div/div/header/div/div/a/h3")  # 昵称
    # 使用xpath对页面进行解析，提取出相应内容
    Times = html.xpath('//*[@id]/div/div/div/div/div/header/div/div/h4/span[1]')  # 发布时间
    ZhuanFa = html.xpath('//*[@id]/div/div/div/div/div/footer/div[1]/h4')   # 转发数
    PingLun = html.xpath('//*[@id]/div/div/div/div/div/footer/div[2]/h4')   # 评论数
    DianZan = html.xpath('//*[@id]/div/div/div/div/div/footer/div[3]/h4')   # 点赞数

    # 将每一页的列表内容转到总列表中
    for user in users:
        user2_list.append(user.text)
    for Time in Times:
        Time_list.append(Time.text)
    for text in Text:
        Text_list.append(text.text)
    for dianzan in DianZan:
        DianZan_list.append(dianzan.text)
    for zhuanfa in ZhuanFa:
        ZhuanFa_list.append(zhuanfa.text)
    for pinglun in PingLun:
        PingLun_list.append(pinglun.text)

    # 保存为CSV文件
    show_table = {
        '发布人': user2_list,
        '发布时间': Time_list,
        '正文内容': Text_list,
        '点赞数': DianZan_list,
        '评论数': PingLun_list,
        '转发数': ZhuanFa_list
    }
    show_table_df = pd.DataFrame(show_table)
    show_table_df.to_csv('user_write_context.csv', index=False, encoding='UTF-8')

    print("********用户"+str(ID)+"爬取完成*********")


