# core/parser.py

from bs4 import BeautifulSoup
from datetime import datetime
import re
import requests
from core.models import User, Weibo



class WeiboParser:

    @staticmethod
    def parse_user_info(html: str) -> dict:
        soup = BeautifulSoup(html, 'html.parser')
        user_info_divs = soup.find_all('div', class_='c')

        user_data = {
            'user_id': '',
            'nickname': '未知',
            'gender': '未知',
            'location': '未知',
            'description': '未知',
            'verified_reason': '未知',
            'weibo_count': 0,
            'following_count': 0,
            'follower_count': 0
        }

        if len(user_info_divs) > 1:
            info_text = ''
            for div in user_info_divs[1:]:
                text = div.text.strip()
                if '昵称' in text:
                    info_text = text
                    break

            if info_text:
                nickname = re.findall('昵称:(.*?)(?=(?:认证|性别))', info_text + '\\n')
                gender = re.findall('性别:(.*?)地区', info_text + '\\n')
                location = re.findall('地区:(.*?)生日', info_text + '\\n')
                description = re.findall('简介:(.*?)(?=\n|$)', info_text + '\\n')

                user_data['nickname'] = nickname[0] if nickname else '未知'
                user_data['gender'] = gender[0] if gender else '未知'
                user_data['location'] = location[0] if location else '未知'
                user_data['description'] = description[0] if description else '未知'

        return user_data

    @staticmethod
    def parse_weibo(html_content: str, fetch_full_text=None) -> dict:
        soup = BeautifulSoup(html_content, 'html.parser')

        # 获取微博ID
        weibo_id = None
        comment_link = soup.find('a', href=re.compile(r'/comment/[A-Za-z0-9]+'))
        if comment_link:
            match = re.search(r'/comment/([A-Za-z0-9]+)', comment_link['href'])
            weibo_id = match.group(1) if match else None

        # 获取内容
        ctt = soup.find('span', class_='ctt')
        content = ctt.get_text().strip() if ctt else ''

        # 处理全文链接
        full_text_link = soup.find('a', string='全文')
        if full_text_link:
            # 这里只是示意，实际应在 WebDriver 中调用完整请求
            full_text_url = f"https://weibo.cn{full_text_link['href']}"
            # print('full_text_url', full_text_url)
            # 如果不传入callback函数，则用request
            if fetch_full_text is None:
                content = default_fetch_full_text(full_text_url)
            else:
                content = fetch_full_text(full_text_url)

        # 时间与设备
        time_source_tag = soup.select_one('span.ct')
        info = time_source_tag.text.strip() if time_source_tag else ''
        publish_time = WeiboParser.handle_time(info) if info else datetime.now()

        device = ''
        if match := re.search(r'来自(.*?)$', info):
            device = match.group(1)

        # 互动数
        like_tag = soup.find('a', href=re.compile(r'/attitude/'))
        comment_tag = soup.find('a', class_='cc')
        repost_tag = soup.find('a', href=re.compile(r'/repost/'))

        like_count = int(re.findall(r'\d+', like_tag.text)[0]) if like_tag and like_tag.text else 0
        comment_count = int(re.findall(r'\d+', comment_tag.text)[0]) if comment_tag and comment_tag.text else 0
        repost_count = int(re.findall(r'\d+', repost_tag.text)[0]) if repost_tag and repost_tag.text else 0

        # 图片
        images = []
        img_pattern = re.compile('组图|原图')
        imgs = soup.find('a', string=img_pattern)
        if imgs:
            img_link = imgs['href']
            images.append(img_link)

        return {
            'id': weibo_id,
            'content': content,
            'publish_time': publish_time.strftime('%Y-%m-%d %H:%M:%S'),
            'device': device,
            'repost_count': repost_count,
            'comment_count': comment_count,
            'like_count': like_count,
            'images': images
        }

    @staticmethod
    def handle_time(publish_time_str):
        now = datetime.now()
        if '分钟前' in publish_time_str:
            minutes_ago = int(re.findall(r'(\d+)分钟前', publish_time_str)[0])
            return now - timedelta(minutes=minutes_ago)
        elif '秒前' in publish_time_str:
            seconds_ago = int(re.findall(r'秒前', publish_time_str)[0])
            return now - timedelta(seconds=seconds_ago)
        elif '今天' in publish_time_str:
            time_part = re.findall(r'今天 (\d+:\d+)', publish_time_str)[0]
            return datetime.strptime(f"{now.year}-{now.month}-{now.day} {time_part}", '%Y-%m-%d %H:%M')
        elif re.match(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}', publish_time_str):
            time_part = publish_time_str[:19]
            return datetime.strptime(time_part, '%Y-%m-%d %H:%M:%S')
        else:
            try:
                month, day, time_part = re.findall(r'(\d+)月(\d+)日 (\d+:\d+)', publish_time_str)[0]
                return datetime.strptime(f"{now.year}-{month}-{day} {time_part}", '%Y-%m-%d %H:%M')
            except Exception:
                return now

    @staticmethod
    def parse_weibo_list(html: str) -> list:
        """
        解析微博列表页的所有微博条目
        :param html: 页面HTML源码
        :return: 微博对象列表
        """
        soup = BeautifulSoup(html, 'html.parser')
        weibo_divs = soup.find_all('div', class_='c', id=lambda x: x and x.startswith('M_'))
        result = []

        for div in weibo_divs:
            weibo_html = str(div)
            parsed = WeiboParser.parse_weibo(weibo_html)
            if parsed:
                result.append(parsed)
                # 建议：此这里去重+补充self.user_id字段

        return result



def default_fetch_full_text(full_text_url):
    """
    默认使用 requests 获取全文内容
    :param full_text_url: 全文链接
    :return: 文本内容或空字符串
    """
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0 Safari/537.36'
    }
    try:
        response = requests.get(full_text_url, headers=headers, timeout=10)
        if response.status_code == 200:
            soup = BeautifulSoup(response.text, 'html.parser')
            ctt = soup.find('span', class_='ctt')
            return ctt.get_text().strip() if ctt else ''
    except Exception as e:
        logging.warning(f"无法获取全文 {full_text_url}: {e}")
    return ''


if __name__ == '__main__':
    import os

    # 设置测试文件路径
    TEST_HTML_PATH = "../html/6819693315_page.html"


    def parse_user_info_from_file():
        if not os.path.exists(TEST_HTML_PATH):
            print(f"文件不存在: {TEST_HTML_PATH}")
            return

        with open(TEST_HTML_PATH, 'r', encoding='utf-8') as f:
            html = f.read()

        # 测试用户信息解析
        user_info = WeiboParser.parse_user_info(html)
        print("解析出的用户信息:")
        print(user_info)

        # 测试微博内容解析
        weibo_list = WeiboParser.parse_weibo_list(html)
        print("\n解析出的微博列表:")
        for w in weibo_list:
            print(w)


    parse_user_info_from_file()
