import requests, re, time, os
from bs4 import BeautifulSoup
from flask import current_app
from consts import SPIDER_HEADER, USER_AVATAR_PATH


from utils.date_format import match_and_get_time
from utils.print_message import print_message



def user_blog_list(use_id, start_date):
    # 格式 date = 2024-01-01
    page = 1
    # 前三页看看
    result = []
    total_page = None
    while total_page is None or page <= total_page:
        url = f'https://weibo.cn/{use_id}/profile?starttime={start_date}&endtime=now&advancedfilter=1&page={page}'
        print_message(url)
        reponse = None
        try:
            reponse = requests.get(url, headers=SPIDER_HEADER)
        except Exception as e:
            print(f"请求失败，60s后重试一次,错误内容:{e}")
            time.sleep(60)
            continue
        soup = BeautifulSoup(reponse.text, 'html.parser')
        # 找到class仅仅为c，没有其他的class的所有div
        div_list = soup.find_all('div', class_='c')
        
        # 用来判断当前页是否有微博数据，没有就结束了
        flag = True
        for div_c in div_list:
            is_valid_span = div_c.find('span', class_='ct')
            if is_valid_span == None:
                continue
            
            flag = False
            
            # 1.获取content的内容
            span_content = div_c.find('span', class_='ctt')
            content = span_content.text
            
            # 2.获取点赞数，转发数，评论数
            a_list = div_c.find_all('a')
            
            likes = re.search(r'赞\[(\d+)\]', a_list[-4].text)[1]
            reposts = re.search(r'转发\[(\d+)\]', a_list[-3].text)[1]
            comments = re.search(r'评论\[(\d+)\]', a_list[-2].text)[1]
            
            # 匹配 /value?  value就是id
            href = a_list[-1].get('href')
            match = re.search(r'/([a-zA-Z0-9]+)\?', href)
            blog_id = match.group(1)
            
            time_text = div_c.find('span', class_='ct').text
            times = match_and_get_time(time_text)
            obj = {
                'id': blog_id,
                'content': content,
                'attitudes': int(likes),
                'comments': int(comments),
                'reposts': int(reposts),
                'times': times
            }
            # 记录总共多少页
            if total_page is None:
                div_total = soup.find('div', class_='pa')
                # 找不到这个东西的时候表示微博只有一页
                if not div_total:
                    total_page = 1
                else:
                    total_page = int(re.search(r'/(\d+)页', div_total.text)[1])
            
            result.append(obj)
        if flag:
            break
        page += 1
    return result

def download_user_avatar(data):
    avatar_url = data['profile_image_url']
    if avatar_url.find('default') != -1:
        data['avatar_file'] = "default.jpg"
        return
    reponse = requests.get(avatar_url)
    avatar_dir = os.path.join(current_app.root_path, USER_AVATAR_PATH)
    str_id = str(data['id'])
    avatar_file_dir = f"{avatar_dir}{str_id}.jpg"
    # 下载文件
    with open(avatar_file_dir, 'wb') as f:
        f.write(reponse.content)
    data['avatar_file'] = f"{str_id}.jpg"