import os
import csv
import time
import random
import re
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from bs4 import BeautifulSoup as bs
import json

# 全局变量
chromedriver_path = "/Users/lin/Downloads/chromedriver-mac-arm64/chromedriver"

# 设置Chrome选项
options = Options()
options.add_experimental_option("debuggerAddress", "127.0.0.1:9527")
web = webdriver.Chrome(executable_path=chromedriver_path, options=options)
web.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
    "source": """
    Object.defineProperty(navigator, 'webdriver', {
      get: () => undefined
    })
  """
})
web.implicitly_wait(10)  # 等待网页的加载时间

#获取 cookies
cookies = web.get_cookies()
time.sleep(10)
# 打开cookie文本，使用已保存的cookie登录
file_path = '/Users/lin/python_project/data/weibocookies.txt'
with open(file_path, 'w') as f:
    f.write(json.dumps(cookies))

# 使用json读取cookies 注意读取的是文件 所以用load而不是loads
with open(file_path) as f:
    cookies_list = json.load(f)
    for cookie in cookies_list:
        web.add_cookie(cookie)

web.refresh() #刷新页面
time.sleep(10)

# 检查关键词的方法
def keywords_check(keywords, text):
    result = re.search(keywords, text)
    return result

def extract_topics_and_contents(text):
    # 使用正则表达式匹配文本中的话题和内容
    pattern = r'#(.*?)#'  # 匹配以#开头和结尾的话题
    topics = re.findall(pattern, text)  # 找到所有匹配的话题
    
    # 根据找到的话题，将文本分割成多个部分
    parts = re.split(pattern, text)
    
    # 第一个部分是第一个话题前面的内容，最后一个部分是最后一个话题后面的内容
    contents = parts[0::2]  # 奇数索引位置的部分是内容
    if len(parts) % 2 == 0:
        contents.append(parts[-1])  # 如果文本末尾没有话题，最后一个部分是最后的内容
    
    # 去除首尾可能存在的空字符串
    if contents and contents[0] == '':
        contents = contents[1:]
    if contents and contents[-1] == '':
        contents = contents[:-1]
    
    return topics, contents

# 根据日期计算年龄的方法
def age_calc(birth_date, end_date):
    birth_date = datetime.strptime(birth_date, '%Y-%m-%d')
    end_date = datetime.strptime(end_date, '%Y-%m-%d')
    day_diff = end_date.day - birth_date.day
    month_diff = end_date.month - birth_date.month
    year_diff = end_date.year - birth_date.year

    if day_diff >= 0:
        if month_diff >= 0:
            years_old = year_diff
        else:
            years_old = year_diff - 1
    else:
        if month_diff >= 1:
            years_old = year_diff
        else:
            years_old = year_diff - 1

    return years_old


# 保存数据到CSV文件
def save_data(web_data, filename):
    fieldnames = ['mid', 'user_name','topics','contents', 'date', 'gender','age', 'follow', 'followers', 'ip',
                  'user_type','official_type', 'repost_num', 'comment_num', 'like_num']
    with open(filename, mode='a', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        if not os.path.getsize(filename):
            writer.writeheader()  # 写入表头
        writer.writerows([web_data])

# 解析搜索结果页面
def parse_search_results(url, filename):
    web.get(url)
    time.sleep(random.randint(5, 10))
    #获取博文的文本内容
    web_object = {}
    html = web.page_source
    # 获取这一网页的所有未展开的文章的展开按钮
    button_list = web.find_elements_by_css_selector('a[action-type="fl_unfold"]') #点击所有展开
    # 在for循环里面每个都点击展开
    for bt in button_list:
        try :
            bt.click()
        except Exception as e:
            print(e.args)

    soup = bs(html, 'html.parser')
    weibo_list = soup.findAll("div", {'action-type': "feed_list_item"})

    for weibo in weibo_list:
        # 解析微博信息
        mid = weibo.get("mid")
        print("mid",mid)
        web_object['mid'] = mid

        text = weibo.findAll("p", {'class': "txt"})[-1].get_text().strip()
        # print(text)

        topics, contents = extract_topics_and_contents(text)

        print("话题：", topics)
        print("内容：", contents)
        web_object['topics'] = topics
        web_object['contents'] = contents
        # web_object['text'] = text


        #昵称
        user_name = weibo.find("a", {'class': "name"}).get_text()
        print("昵称", user_name)
        web_object['user_name'] = user_name

        #时间
        itime = weibo.find("div", {'class': "from"})
        date = itime.find("a").get_text().strip()
        print("发布时间：", date)
        web_object['date'] = date

        #转发、评论、点赞
        cardact = weibo.find("div", {'class': "card-act"})
        repost_num = cardact.findAll("li")[0].get_text().strip()
        if repost_num =="转发":
            repost_num = 0
        print("转发人数：", repost_num)
        web_object['repost_num'] = repost_num

        comment_num = cardact.findAll("li")[1].get_text().strip()
        if comment_num == "评论":
            comment_num = 0
        print("评论人数：", comment_num)
        web_object['comment_num'] = comment_num

        like_num = cardact.findAll("li")[2].get_text().strip()
        if like_num == "赞":
            like_num =0
        print("点赞人数：", like_num)
        web_object['like_num'] = like_num

        # 获取更多关于微博博主的信息
        # user_link = weibo.find("a").get("href")
        user_link = weibo.find("a", {'class': 'name'}).get("href")
        # print("用户主页：", user_link)
        # web_object['user_link'] = user_link
        user_url = "'" + "https:" + user_link + "'"
        js = "window.open(" + user_url + ");"
        web.execute_script(js)
        time.sleep(random.randint(2, 5))

        window_1 = web.current_window_handle
        windows = web.window_handles

        for current_window in windows:
            if current_window != window_1:
                web.switch_to.window(current_window)

        # web.implicitly_wait(10)  # 等待网页的加载时间
        # 等待页面加载完成
        wait = WebDriverWait(web, 10)
        wait.until(EC.presence_of_element_located((By.TAG_NAME, 'body')))

        html = web.page_source
        soup = bs(html, 'html.parser')
        print("切换到用户主页")

        try:
            typehtml = soup.find("div", {'class': "woo-avatar-main woo-avatar-hover ProfileHeader_avatar2_1gEyo"})
            official_type = typehtml.find("span").get("title")
            web_object['official_type'] = official_type
            print("官方认证", official_type)
        except AttributeError as e:
            web_object['official_type'] = '无'
            print("官方认证：", '无')

        type_element = soup.find('div',{'class':"ProfileHeader_descText_3AF6o"})
        if type_element:
            user_type = type_element.get_text(strip=True)
            web_object['user_type'] = user_type
            print("用户类型：",user_type)
        else:
            web_object['user_type'] = '普通用户'
            print("用户类型：",'普通用户')        
    
        genderhtml = soup.find("div",{'class': "woo-box-flex woo-box-alignCenter ProfileHeader_h3_2nhjc"})
        gender = genderhtml.find("span").get("title").strip()
        print("用户性别：", gender)
        web_object['gender'] = gender

        fanshtml = soup.find("div",{'class': "woo-box-flex woo-box-alignCenter ProfileHeader_h4_gcwJi"})
        followers = fanshtml.findAll("a")[0].get_text().strip('粉丝')
        followers =followers[3:]
        follow = fanshtml.findAll("a")[1].get_text().strip('关注')
        follow = follow[3:]
        print("粉丝数量:",followers,"关注人数:",follow)
        web_object['follow'] = follow
        web_object['followers'] = followers



        # 获取用户的ip以及年龄
               # 点击展开用户主页信息
        button = web.find_element_by_xpath('//*[@id="app"]/div[2]/div[2]/div[2]/main/div/div/div[2]/div[1]/div[1]/div[3]/div/div/div[2]')
        button.click()
        html = web.page_source
        soup = bs(html, 'html.parser')
        ipflag = 0# 为1时表示已经获取ip信息
        infohtml = soup.findAll("div",{'class': "woo-box-item-flex ProfileHeader_con3_Bg19p"})

        
        for info in infohtml:
            str1 = str(info.get_text()).strip()
            print(str1)
            try:
                if (keywords_check('加入微博', str1)):#日期后面跟一个“加入微博”，排除
                    print("加入微博时间,不是生日")
                else:	# 正则匹配日期格式
                    result = re.findall("\d{4}[-|.|/]?\d{2}[-|.|/]?\d{2}", str1)
                    if(result):
                        print("生日", result[0])
                        age = age_calc(result[0], '2024-01-06')#网上搜的一个生日计算的方法
                        print(age)
                        web_object['age'] = age
            except TypeError as e:
                print('')
            if (keywords_check('IP属地', str1)):# 检查用户主页是否有ip属地这种格式的ip
                ipflag = 1	# 有的话标记为1，表示已经拿到ip
                ip = str1[5:] # 将（ip属地：）五个字符去掉，保留后面的地址
                print("地址", ip)
                web_object['ip']=ip


        if ipflag == 0: #如果前面没找到ip属地，则找ip图标来判断
            try:
                iphtml = soup.find("div",{'class':"ProfileHeader_box3_2R7tq"})
                ip = iphtml.find("i",{'class':"woo-font woo-font--proPlace"}).parent.parent.get_text()
                print("ip:", ip)
                web_object['ip']=ip
            except AttributeError as e:
                    print("该用户没有ip信息")


        save_data(web_object, filename)
        web.close()
        web.switch_to.window(web.window_handles[0])

# 爬虫的主体函数
def start_crawler(url, filename):
    parse_search_results(url, filename)

if __name__ == '__main__':
    keyword = "小米"
    page_count = 1
    for page in range(page_count):
        print('开始获取第%d页的搜索结果' % (page + 1))  
        temp = str(page + 1)
        url = 'https://s.weibo.com/weibo?q=%s&Refer=index&page=%s' % (keyword, temp)
        filename = os.getcwd() + '/data/%s.csv' % keyword
    # with open(filename, 'r') as file:
        start_crawler(url, filename)
