import requests, re, time
from bs4 import BeautifulSoup
from datetime import datetime, timedelta

from common.consts import SPIDER_HEADER

def match_and_get_time(text):
    """
    匹配三种内容：
    1.今天 00:51
    2.05月13日 21:47
    3.2024-12-28 21:33:20
    4.19分钟前 / 1小时前
    需要将这三种情况，全部转化成yyyy-mm-dd hh-mm-ss的格式
    """
    
    now = datetime.now()

    # 匹配：今天 00:51
    match_today = re.search(r'今天\s+(\d{2}):(\d{2})', text)
    if match_today:
        hour, minute = map(int, match_today.groups())
        dt = datetime(now.year, now.month, now.day, hour, minute)
        return dt.strftime("%Y-%m-%d %H:%M:%S")

    # 匹配：05月13日 21:47
    match_md = re.search(r'(\d{2})月(\d{2})日\s+(\d{2}):(\d{2})', text)
    if match_md:
        month, day, hour, minute = map(int, match_md.groups())
        dt = datetime(now.year, month, day, hour, minute)
        return dt.strftime("%Y-%m-%d %H:%M:%S")

    # 匹配：2024-12-28 21:33:20
    match_full = re.search(r'(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})', text)
    if match_full:
        dt = datetime.strptime(match_full.group(1), "%Y-%m-%d %H:%M:%S")
        return dt.strftime("%Y-%m-%d %H:%M:%S")
    
    # 4. 分钟前（如“19分钟前”）
    match_min = re.search(r'(\d+)分钟前', text)
    if match_min:
        minutes = int(match_min.group(1))
        dt = now - timedelta(minutes=minutes)
        return dt.strftime("%Y-%m-%d %H:%M:%S")

    # 5. 小时前（如“1小时前”）
    match_hour = re.search(r'(\d+)小时前', text)
    if match_hour:
        hours = int(match_hour.group(1))
        dt = now - timedelta(hours=hours)
        return dt.strftime("%Y-%m-%d %H:%M:%S")
    
    print("无法识别时间格式")
    print(text)
    assert False


def user_blog_list(use_id, start_date):
    # 格式 date = 2024-01-01
    page = 1
    # 前三页看看
    result = []
    total_page = None
    while total_page is None or page <= total_page:
        url = f'https://weibo.cn/{use_id}/profile?starttime={start_date}&endtime=now&advancedfilter=1&page={page}'
        print(url)
        reponse = None
        try:
            reponse = requests.get(url, headers=SPIDER_HEADER)
        except Exception as e:
            print(f"请求失败，60s后重试一次,错误内容:{e}")
            time.sleep(60)
            continue
        soup = BeautifulSoup(reponse.text, 'html.parser')
        # 找到class仅仅为c，没有其他的class的所有div
        div_list = soup.find_all('div', class_='c')
        
        # 用来判断当前页是否有微博数据，没有就结束了
        flag = True
        for div_c in div_list:
            is_valid_span = div_c.find('span', class_='ct')
            if is_valid_span == None:
                continue
            
            flag = False
            
            # 1.获取content的内容
            span_content = div_c.find('span', class_='ctt')
            content = span_content.text
            
            # 2.获取点赞数，转发数，评论数
            a_list = div_c.find_all('a')
            
            likes = re.search(r'赞\[(\d+)\]', a_list[-4].text)[1]
            reposts = re.search(r'转发\[(\d+)\]', a_list[-3].text)[1]
            comments = re.search(r'评论\[(\d+)\]', a_list[-2].text)[1]
            
            # 匹配 /value?  value就是id
            href = a_list[-1].get('href')
            match = re.search(r'/([a-zA-Z0-9]+)\?', href)
            blog_id = match.group(1)
            
            time_text = div_c.find('span', class_='ct').text
            times = match_and_get_time(time_text)
            obj = {
                'id': blog_id,
                'content': content,
                'attitudes': int(likes),
                'comments': int(comments),
                'reposts': int(reposts),
                'times': times
            }
            # 记录总共多少页
            if total_page is None:
                div_total = soup.find('div', class_='pa')
                # 找不到这个东西的时候表示微博只有一页
                if not div_total:
                    total_page = 1
                else:
                    total_page = int(re.search(r'/(\d+)页', div_total.text)[1])
            
            result.append(obj)
        if flag:
            break
        page += 1
    return result


