import time
import datetime
import csv
import json
import requests
import hashlib
import os
import pandas as pd
import urllib.parse
from selenium import webdriver
from selenium.webdriver.edge.service import Service
from selenium.webdriver.common.by import By

COOKIE_PATH = "cookies.json"


def save_cookies(driver, path=COOKIE_PATH):
    with open(path, 'w', encoding='utf-8') as f:
        json.dump(driver.get_cookies(), f)
    print("[√] Cookies 已保存")


def load_cookies(driver, path=COOKIE_PATH):
    try:
        with open(path, 'r', encoding='utf-8') as f:
            cookies = json.load(f)
        for cookie in cookies:
            if 'expiry' in cookie:
                del cookie['expiry']
            driver.add_cookie(cookie)
        print("[√] Cookies 已加载")
        return True
    except Exception as e:
        print("Cookies 加载失败：", e)
        return False


def get_user_id_by_name(username):
    url = "https://m.weibo.cn/api/container/getIndex"
    params = {
        "containerid": f"100103type=3&q={username}"
    }
    headers = {
        "User-Agent": "Mozilla/5.0",
    }

    try:
        res = requests.get(url, headers=headers, params=params, timeout=10)
        data = res.json()
        if data.get("data") and data["data"].get("cards"):
            for card in data["data"]["cards"]:
                if card.get("card_group"):
                    for user_card in card["card_group"]:
                        user_info = user_card.get("user")
                        if user_info:
                            print(f"[√] 昵称：{user_info['screen_name']}, UID：{user_info['id']}")
                            return user_info["id"]
    except Exception as e:
        print("UID 获取失败：", e)
    print("未找到该用户")
    return None


def construct_keyword_url(uid, keyword):
    encoded_keyword = urllib.parse.quote(keyword)
    now_timestamp = int(time.time())
    return f"https://weibo.com/u/{uid}?key_word={encoded_keyword}&is_ori=1&is_forward=1&end_time={now_timestamp}"


def hash_text(text):
    return hashlib.md5(text.strip().encode('utf-8')).hexdigest()


def scrape_user_weibo(nickname, uid, keyword):
    path_to_chromedriver = r"C:\Program Files (x86)\Microsoft\Edge\Application\msedgedriver.exe"
    service = Service(executable_path=path_to_chromedriver)
    driver = webdriver.Edge(service=service)

    weibo_data = []

    try:
        driver.get("https://weibo.com/")
        time.sleep(5)
        logged_in = False
        if load_cookies(driver):
            driver.refresh()
            time.sleep(5)
            if "登录" not in driver.page_source:
                logged_in = True
        if not logged_in:
            print("请在浏览器中完成登录，完成后按回车继续...")
            input()
            save_cookies(driver)

        target_url = construct_keyword_url(uid, keyword)
        print(f"[→] 正在抓取关键词：{keyword} -> {target_url}")
        driver.get(target_url)
        time.sleep(5)

        scroll_increment = 200
        pause_between_scroll = 1
        total_scroll = 0
        last_page_height = driver.execute_script("return document.body.scrollHeight")
        seen_fingerprints = set()

        while total_scroll < last_page_height:
            driver.execute_script("window.scrollTo(0, arguments[0]);", total_scroll)
            time.sleep(pause_between_scroll)
            total_scroll += scroll_increment
            new_page_height = driver.execute_script("return document.body.scrollHeight")
            if new_page_height > last_page_height:
                last_page_height = new_page_height

            cards = driver.find_elements(By.XPATH, "//article[contains(@class, 'Feed_wrap_')]")
            for card in cards:
                # 初始化正文和转发内容为空
                weibo_text = ""
                retweeted_text = ""
                qrcode_link = ""  # 用于保存二维码链接
                publish_time = ""  # 用于保存发布时间

                try:
                    # 尝试点击"展开"按钮，展开更多内容
                    expand_button = card.find_element(By.XPATH, ".//span[contains(text(), '展开')]")
                    driver.execute_script("arguments[0].click();", expand_button)
                    time.sleep(0.3)
                except:
                    pass  # 如果没有"展开"按钮，则跳过

                try:
                    # 提取微博的发布时间
                    time_elem = card.find_element(By.XPATH, ".//a[contains(@class, 'time_')]")
                    publish_time = time_elem.get_attribute("title") or time_elem.text
                except:
                    publish_time = ""  # 如果没有时间，保持为空

                # 提取微博正文
                try:
                    text_elem = card.find_element(By.XPATH, ".//div[contains(@class, 'detail_wbtext_')]")
                    weibo_text = text_elem.text.strip()
                except:
                    pass  # 若没有正文，继续检查其他内容

                # 检查是否是二维码链接
                try:
                    qrcode_elem = card.find_element(By.XPATH, ".//a[@qrcode]")
                    qrcode_link = qrcode_elem.get_attribute("qrcode")
                    if qrcode_link and not weibo_text.strip():  # 如果正文为空，且有二维码
                        weibo_text = f"FF二维码链接: {qrcode_link}"  # 将二维码链接作为微博正文
                except:
                    pass  # 没有二维码则跳过

                # 提取转发的原文内容
                try:
                    retweet_elem = card.find_element(By.XPATH,
                                                     ".//div[contains(@class, 'detail_reText_')]/div[contains(@class, 'detail_wbtext_')]")
                    retweeted_text = retweet_elem.text.strip()
                except:
                    pass  # 若没有转发内容，继续检查

                # 如果转发原文有二维码链接，提取并保留
                if retweeted_text.strip() and qrcode_link:
                    retweeted_text = f"转发二维码链接: {qrcode_link}"

                # 如果没有微博正文和转发内容，则跳过当前卡片
                if not weibo_text.strip() and not retweeted_text.strip():
                    continue

                # 如果微博正文是二维码链接且没有时间信息，仍然保留该微博
                if weibo_text.startswith("FF二维码链接:") and not publish_time:
                    weibo_data.append([nickname, uid, publish_time, weibo_text, retweeted_text, keyword])
                # 否则，只有当有时间信息时，才保存该微博
                elif publish_time:
                    weibo_data.append([nickname, uid, publish_time, weibo_text, retweeted_text, keyword])
        print(f"[✓] 关键词“{keyword}”抓取完成，共采集：{len(weibo_data)} 条微博")

    finally:
        driver.quit()

    return weibo_data


def merge_and_export(province, city, nickname, all_data):
    fingerprint_to_data = {}

    for row in all_data:
        nickname, uid, publish_time, text, retweeted_text, keyword = row

        # 如果时间为空，则不进行去重，直接保存
        if not publish_time or publish_time == "":
            fingerprint = uid
        else:
            # 如果时间不为空，则按时间去重
            fingerprint = (uid, publish_time)

        if fingerprint not in fingerprint_to_data:
            fingerprint_to_data[fingerprint] = {
                "nickname": nickname,
                "uid": uid,
                "publish_time": publish_time,
                "text": text,
                "retweeted_text": retweeted_text,
                "keywords": set()
            }

        fingerprint_to_data[fingerprint]["keywords"].add(keyword)

    final_data = []
    for item in fingerprint_to_data.values():
        final_data.append([
            item["nickname"],
            item["uid"],
            item["publish_time"],
            item["text"],
            item["retweeted_text"],
            "；".join(sorted(item["keywords"]))
        ])

    final_data.sort(key=lambda x: x[2] if x[2] else '')  # 按时间排序，如果时间为空，则不排序

    # 确保目录存在，如果不存在则创建
    if province == '北京市' or province == '上海市' or province == '天津市' or province == '重庆市':
        directory = f"Data/{province}"
    else:
        directory = f"Data/{province}/{city}"
    os.makedirs(directory, exist_ok=True)

    # 保存为CSV文件
    with open(f"{directory}/{nickname}_{datetime.date.today()}.csv", "w", encoding="utf-8-sig", newline="") as f:
        writer = csv.writer(f)
        writer.writerow(["用户名", "UID", "发布时间", "微博正文", "转发原文", "匹配关键词"])

        for row in final_data:
            writer.writerow(row)

    print(f"[📁] 最终结果已写入：{directory}/{nickname}_{datetime.date.today()}.csv，共 {len(final_data)} 条去重微博")


def get_xls_files(directory):
    # 获取指定目录下所有的 Excel 文件
    return [f for f in os.listdir(directory) if f.endswith('.xlsx')]


if __name__ == "__main__":
    directory = 'prov_city_lists_2'  # 指定文件所在目录路径
    all_data = []

    # 获取目录下所有的xls文件
    xls_files = get_xls_files(directory)

    # 遍历每个文件
    for xls_file in xls_files:
        print(f"Processing file: {xls_file}")
        file_path = os.path.join(directory, xls_file)

        # 使用pandas读取Excel文件
        df = pd.read_excel(file_path)

        # 遍历每一行，获取省份、城市、用户名和关键词列表
        for index, row in df.iterrows():
            city_code = row['CityCode']  # 城市编码
            city_name = row['citynm']  # 城市名称
            province = row['prov_code']  # 省份
            nickname = row['user_name']  # 用户名
            keywords = ['垮塌', '坍塌', '塌陷', '道路中断', '交通中断']
            # keywords = ['地陷', '塌陷']

            # 获取用户ID
            uid = get_user_id_by_name(nickname)
            if not uid:
                print(f"无法找到用户名: {nickname}")
                continue

            # 遍历关键词并抓取数据
            user_data = []
            for kw in keywords:
                data = scrape_user_weibo(nickname, uid, kw)
                user_data.extend(data)

            # 合并并导出数据
            merge_and_export(province, city_name, nickname, user_data)
            all_data.extend(user_data)

            print("Waiting...")
            time.sleep(10)  # 等待30秒，防止被限制访问

        print(f"Finish file: {xls_file}")
