import requests, re, time, os

from utils.print_message import print_message
from utils.create_header import create_header

from bs4 import BeautifulSoup

from utils.date_format import match_and_get_time


def get_user_weibo_page(use_id, cookie, start_date):
    url = f'https://weibo.cn/{use_id}/profile?starttime={start_date}&endtime=now&advancedfilter=1&page=1'
    print_message(f"获取当前总页数，url为:{url}")
    reponse = None
    try:
        headers = create_header(cookie)
        reponse = requests.get(url, headers=headers)
    except Exception as e:
        print(f"请求失败，60s后重试一次,错误内容:{e}")
    soup = BeautifulSoup(reponse.text, 'html.parser')
    div_total = soup.find('div', class_='pa')
    total_page = 1
    # 找不到这个东西的时候表示微博只有一页
    if div_total:
        total_page = int(re.search(r'/(\d+)页', div_total.text)[1])
    return total_page


def get_weibo_list_by_page(user_id, cookie, page, start_date):
    url = f'https://weibo.cn/{user_id}/profile?starttime={start_date}&endtime=now&advancedfilter=1&page={page}'
    print_message(url)
    reponse = None
    try:
        headers = create_header(cookie)
        reponse = requests.get(url, headers=headers)
    except Exception as e:
        print(f"请求失败，错误内容:{e}")
        return None
    
    result = []
    
    soup = BeautifulSoup(reponse.text, 'html.parser')
    # 找到class仅仅为c，没有其他的class的所有div
    div_list = soup.find_all('div', class_='c')
    
    for div_c in div_list:
        is_valid_span = div_c.find('span', class_='ct')
        if is_valid_span == None:
            continue
        # 1.获取content的内容
        span_content = div_c.find('span', class_='ctt')
        content = span_content.text
        
        # 2.获取点赞数，转发数，评论数
        a_list = div_c.find_all('a')
        
        likes = re.search(r'赞\[(\d+)\]', a_list[-4].text)[1]
        reposts = re.search(r'转发\[(\d+)\]', a_list[-3].text)[1]
        comments = re.search(r'评论\[(\d+)\]', a_list[-2].text)[1]
        
        # 匹配 /value?  value就是id
        href = a_list[-1].get('href')
        match = re.search(r'/([a-zA-Z0-9]+)\?', href)
        blog_id = match.group(1)
        
        time_text = div_c.find('span', class_='ct').text
        times = match_and_get_time(time_text)
        obj = {
            'weibo_id': blog_id,
            'user_id': user_id,
            'content': content,
            'attitudes': int(likes),
            'comments': int(comments),
            'reposts': int(reposts),
            'release_time': times
        }
        result.append(obj)
    return result
    
    