import os
import traceback
from urllib import parse
import requests
import json
import time
import random  # 添加缺失的random模块导入
from bs4 import BeautifulSoup
from settings import weibo_current_path, weibo_hot_path, weibo_pic_path, record_url_local, requests_timeout
from tools import download_pic, log, excel, getResModel
# 新增导入Selenium相关
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

class Weibo:
    def __init__(self):
        self.session = self._create_session()
        # 初始化Selenium浏览器实例
        self.browser = webdriver.Chrome()
        if not os.path.exists(weibo_current_path):
            os.mkdir(weibo_current_path)
        if not os.path.exists(weibo_hot_path):
            os.mkdir(weibo_hot_path)
        if not os.path.exists(weibo_pic_path):
            os.mkdir(weibo_pic_path)

    def _create_session(self):
        session = requests.Session()
        # 设置请求头，模拟浏览器访问
        session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Connection': 'keep-alive',
        })
        # 配置重试机制
        retries = requests.packages.urllib3.util.retry.Retry(
            total=5,
            backoff_factor=0.3,
            status_forcelist=[500, 502, 503, 504, 429],
            allowed_methods=frozenset(['GET'])
        )
        session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retries))
        session.keep_alive = False  # 关闭多余连接
        return session

    def setKeyword(self, keywords: list):
        self.key = "+".join(keywords)

    def __pic(self, mblog: dict, source: str, base_path: str):
        if 'pics' in mblog.keys():
            tags = self.__getTages(mblog['text'])
            excel.appendTagsFromList(tags)
            # 使用Selenium动态抓取详情页发布时间和点赞数
            publish_time = "-"
            likes_count = 0
            try:
                detail_url = 'https://m.weibo.cn/detail/' + mblog['id']
                self.browser.get(detail_url)
                WebDriverWait(self.browser, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "time")))
                # 发布时间
                try:
                    time_element = self.browser.find_element(By.CLASS_NAME, "time")
                    publish_time = time_element.text.strip()
                except Exception:
                    publish_time = "-"
                # 点赞数
                try:
                    tab_items = self.browser.find_elements(By.CLASS_NAME, "tab-item")
                    for item in tab_items:
                        if "赞" in item.text:
                            parts = item.text.strip().split("赞")
                            if len(parts) > 1:
                                likes_text = parts[1].strip()
                                if "万" in likes_text:
                                    likes_count = int(float(likes_text.replace("万", "")) * 10000)
                                elif likes_text.isdigit():
                                    likes_count = int(likes_text)
                                else:
                                    likes_count = 0
                            break
                    # like_block = self.browser.find_element(By.CLASS_NAME, "m-diy-btn")
                    # h4 = like_block.find_element(By.TAG_NAME, "h4")
                    # likes_text = h4.text.strip()
                    # if '万' in likes_text:
                    #     likes_count = int(float(likes_text.replace('万', '')) * 10000)
                    # elif likes_text.isdigit():
                    #     likes_count = int(likes_text)
                    # else:
                    #     likes_count = 0
                except Exception:
                    likes_count = 0
            except Exception as e:
                log(f"Selenium 获取详情页失败: {e}")
            for pic_url in mblog['pics']:
                try:
                    if 'large' in pic_url.keys():
                        temp = self.__divideWeibo(mblog=mblog, source=source, tags=tags, url=pic_url['large']['url'],
                                                  base_path=base_path,
                                                  publish_time=publish_time, likes_count=likes_count)
                        excel.appendRecord(temp)
                        download_pic(pic_url['large']['url'], temp[record_url_local])
                    else:
                        temp = self.__divideWeibo(mblog=mblog, source=source, tags=tags, url=pic_url['url'],
                                                  base_path=base_path,
                                                  publish_time=publish_time, likes_count=likes_count)
                        excel.appendRecord(temp)
                        download_pic(pic_url['url'], temp[record_url_local])
                except Exception as e:
                    log(f"下载图片失败: {e}")
                    continue

    def __getTages(self, text: str):
        tags = []
        soup = BeautifulSoup(text, 'html.parser')
        for i in soup.find_all(class_='surl-text'):
            tags.append(i.text.replace("#", ""))
        return tags

    def __getText(self, text: str, detail_url: str = None):
        soup = BeautifulSoup(text, 'html.parser')
        # return soup.text
        text_content = soup.text
        # 如果需要抓取详情页更完整正文，可以在此处扩展为用Selenium等方式获取
        # 这里占位，如果detail_url不为None，可以扩展抓取逻辑
        if detail_url is not None:
            # 尝试用Selenium获取更完整正文
            try:
                self.browser.get(detail_url)
                WebDriverWait(self.browser, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "weibo-text")))
                text_elem = self.browser.find_element(By.CLASS_NAME, "weibo-text")
                detail_text = text_elem.text.strip()
                if detail_text:
                    return detail_text
            except Exception:
                pass
        return text_content

    def __divideWeibo(self, mblog: dict, source: str, tags: list, url: str, base_path: str, publish_time: str = "-",
                          likes_count: int = 0):
        # 获取简要正文
        basic_text = self.__getText(mblog['text'])
        # 获取完整正文
        full_text = self.__getText(mblog['text'], 'https://m.weibo.cn/detail/' + mblog['id'])
        res = getResModel(
            type='图片', source=source, keyword=self.key,
            author=mblog['user']['screen_name'], title="-",
            # introduction=self.__getText(mblog['text']),
            introduction=basic_text,
            full_text=full_text,
            tags=tags, caption="-",
            url_article='https://m.weibo.cn/detail/' + mblog['id'],
            url_pic=url,
            url_local=base_path + self.key + str(excel.nrows) + ".png",

            publish_time=publish_time,
            likes_count=likes_count

        )
        return res

    def __currentAnalysis(self, page: int):
        url = f"https://m.weibo.cn/api/container/getIndex?containerid=100103type%3D61%26q%3D{self.key}%26t%3D&page_type=searchall&page={page}"
        data = self.__request(url)
        if not data or 'data' not in data or 'cards' not in data['data']:
            log(f"页面 {page} 数据格式异常: {data}")
            return
        for card in data['data']['cards']:
            try:
                if 'mblog' not in card:
                    continue
                if excel.checkHttpRepeat('https://m.weibo.cn/detail/' + card['mblog']['id']):
                    continue
                print(card['mblog']['id'])
                self.__pic(mblog=card['mblog'], source="微博实时动态", base_path=weibo_current_path)
            except Exception as e:
                log(f"处理实时微博卡片失败: {e}")
                continue

    def __hotAnalysis(self, page: int):
        url = f"https://m.weibo.cn/api/container/getIndex?containerid=100103type%3D60%26q%3D{self.key}%26t%3D&page_type=searchall&page={page}"
        data = self.__request(url)
        if not data or 'data' not in data:
            log(f"热门页面 {page} 数据格式异常: {data}")
            return
        for card in data['data']['cards']:
            try:
                if 'mblog' in card.keys():
                    if excel.checkHttpRepeat('https://m.weibo.cn/detail/' + card['mblog']['id']):
                        continue
                    print(card['mblog']['id'])
                    self.__pic(mblog=card['mblog'], source="微博热门动态", base_path=weibo_hot_path)
                elif 'card_group' in card.keys():
                    for card_group in card['card_group']:
                        if 'mblog' in card_group.keys():
                            if excel.checkHttpRepeat('https://m.weibo.cn/detail/' + card_group['mblog']['id']):
                                continue
                            print(card_group['mblog']['id'])
                            self.__pic(mblog=card_group['mblog'], source="微博热门动态", base_path=weibo_hot_path)
            except Exception as e:
                log(f"处理热门微博卡片失败: {e}")
                continue

    def __picAnalysis(self, page: int):
        url = f"https://m.weibo.cn/api/container/getIndex?containerid=100103type%3D63%26q%3D{self.key}%26t%3D&page_type=searchall&page={page}"
        data = self.__request(url)
        if not data or 'data' not in data or 'cards' not in data['data']:
            log(f"图片页面 {page} 数据格式异常: {data}")
            return
        for cards in data['data']['cards']:
            try:
                if 'card_group' not in cards:
                    continue
                for card_group in cards['card_group']:
                    if 'left_element' in card_group and 'mblog' in card_group['left_element']:
                        if excel.checkHttpRepeat('https://m.weibo.cn/detail/' + card_group['left_element']['mblog']['id']):
                            continue
                        print(card_group['left_element']['mblog']['id'])
                        self.__pic(mblog=card_group['left_element']['mblog'], source="微博图片动态", base_path=weibo_pic_path)
                    if 'right_element' in card_group and 'mblog' in card_group['right_element']:
                        if excel.checkHttpRepeat('https://m.weibo.cn/detail/' + card_group['right_element']['mblog']['id']):
                            continue
                        print(card_group['right_element']['mblog']['id'])
                        self.__pic(mblog=card_group['right_element']['mblog'], source="微博图片动态", base_path=weibo_pic_path)
            except Exception as e:
                log(f"处理图片微博卡片失败: {e}")
                continue

    def __request(self, url: str):
        try:
            # 添加随机延迟，避免频繁请求
            time.sleep(1 + 2 * random.random())  # 修复：使用正确导入的random模块
            response = self.session.get(url=url, timeout=requests_timeout, verify=True)
            response.raise_for_status()  # 检查HTTP状态码
            data = response.json()
            if data.get('ok') != 1:
                log(f"API请求失败: {data.get('msg', '未知错误')}")
                return None
            return data
        except requests.exceptions.SSLError as e:
            log(f"SSL错误: {e}. 尝试降低SSL验证级别")
            try:
                # 尝试降低SSL验证级别
                response = self.session.get(url=url, timeout=requests_timeout, verify=False)
                response.raise_for_status()
                return response.json()
            except Exception as e2:
                log(f"再次请求失败: {e2}")
                return None
        except requests.exceptions.RequestException as e:
            log(f"请求异常: {e}")
            return None
        except json.JSONDecodeError as e:
            log(f"JSON解析错误: {e}. 响应内容: {response.text[:200]}...")
            return None

    def current(self, page: int):
        for i in range(1, page + 1):
            try:
                print(f"正在爬取实时微博第 {i} 页")
                self.__currentAnalysis(i)
            except Exception as e:
                log(f"爬取实时微博第 {i} 页时出错: {traceback.format_exc()}")
                # 失败后继续下一页，而不是中断
                continue

    def hot(self, page: int):
        for i in range(1, page + 1):
            try:
                print(f"正在爬取热门微博第 {i} 页")
                self.__hotAnalysis(i)
            except Exception as e:
                log(f"爬取热门微博第 {i} 页时出错: {traceback.format_exc()}")
                continue

    def picture(self, page: int):
        for i in range(1, page + 1):
            try:
                print(f"正在爬取图片微博第 {i} 页")
                self.__picAnalysis(i)
            except Exception as e:
                log(f"爬取图片微博第 {i} 页时出错: {traceback.format_exc()}")
                continue