import time

import requests
import re
import json
import urllib.parse
from bs4 import BeautifulSoup

headers = {
    'authority': 's.weibo.com',
    'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'accept-encoding': 'gzip, deflate, br',
    'cookie': 'UOR=,,www.baidu.com; SCF=Ar-_4XvvZkAGuZsWVLyxaQYDM-e_QpbXRIcxnyU8c4dPAizEgXH0L05-B480471ctutIxFPg5HF24YdkhxePoD8.; SINAGLOBAL=7986399817477.452.1709480846774; SUB=_2A25LgJhwDeRhGeFG6FUZ9S3LyjiIHXVo_5W4rDV8PUNbmtANLUzbkW9NebEj8VHiYo_IGA8_PFL_JSmx1ST7EQsB; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9W5-pJAjgH5O6OuBF_K9WpxY5NHD95QN1heN1h-0S02XWs4DqcjMi--NiK.Xi-2Ri--ciKnRi-zNe0q01hz7SoqRe7tt; ALF=02_1722578208; _s_tentry=weibo.com; Apache=892661406526.2761.1719986292685; ULV=1719986292689:29:1:1:892661406526.2761.1719986292685:1715224205372'
}
q = urllib.parse.quote('旅游')
filter_list = ['征婚', '专升本']
regex = re.compile(r'<[^>]+>')
def remove_html(string):
    return regex.sub('', string)

def remove_blank(string):
    return string.replace(' ', '').replace('\n', '')

def main_worker():
    # get_weibo()
    get_comments()

def get_weibo():

    record_list = []

    y = '2023'
    m = '05'
    for d in ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10',
              '11', '12', '13', '14', '15', '16', '17', '18', '19', '20',
              '21', '22', '23', '24', '25', '26', '27', '28', '29', '30'][:1]:
        for page in range(50):
            page += 1
            url = f'https://s.weibo.com/weibo?q={q}&typeall=1&haspic=1&suball=1&timescope=custom:{y}-{m}-{d}-0:{y}-{m}-{d}-23&Refer=g&page={page}'
            try:
                resp = requests.get(url, headers=headers)
                html_doc = resp.text
                soup = BeautifulSoup(html_doc, 'html.parser')

                content_list = soup.find_all('div', attrs={'action-type':'feed_list_item', 'class':'card-wrap'})
                for content in content_list:
                    isGuanggao = False
                    try:
                        record = {}

                        # 微博id
                        record['blog_id'] = content['mid']

                        # 微博链接
                        record['blog_link'] = content.find_next('div', attrs={'class': 'from'}).find_next('a')['href']

                        # 微博来源
                        record['blog_from'] = remove_blank(content.find_next('div', attrs={'class':'from'}).find_next('a', attrs={'rel':'nofollow'}).text)

                        # 微博时间
                        time = remove_blank(content.find_next('div', attrs={'class':'from'}).find_next('a').text)

                        if(y in time or y == '2023'):
                            record['blog_time'] = time
                        else:
                            print(time)
                            break

                        # # 微博正文
                        txt = content.find_next('p', attrs={'node-type':'feed_list_content'}).text
                        txt = remove_html(txt)
                        txt = remove_blank(txt)
                        record['blog_text'] = txt

                        # 微博图片
                        url_list = []
                        pic_list = content.find_next('div', attrs={'node-type':'fl_pic_list'}).find_all('li', attrs={'action-type':'fl_pics'})
                        for pic in pic_list:
                            url = pic.find_next('img')['src']
                            url_list.append(url)
                        record['blog_pics'] = url_list

                        # 转发、评论、点赞数
                        record['forward_count'] = remove_blank(content.find_next('a', attrs={'action-type':'feed_list_forward'}).text)
                        if(record['forward_count'] == '转发'):
                            record['forward_count'] = '0'
                        record['comment_count'] = remove_blank(content.find_next('a', attrs={'action-type': 'feed_list_comment'}).text)
                        if(record['comment_count'] == '评论'):
                            record['comment_count'] = '0'
                        record['like_count'] = remove_blank(content.find_next('a', attrs={'action-type': 'feed_list_like'}).text)
                        if(record['like_count'] == '赞'):
                            record['like_count'] = '0'

                        # 博主id
                        record['user_id'] = record['blog_link'].split('/')[3]

                        # 博主昵称
                        record['user_name'] = content.find_next('a', attrs={'class':'name'})['nick-name']

                        # 删除广告
                        for x in filter_list:
                            if x in txt:
                                isGuanggao = True
                                break

                        if not isGuanggao:
                            record_list.append(record)
                        else:
                            print('{}被过滤'.format(record['blog_link']))
                    except:
                        pass
            except:
                pass
            print(f'page={page},num={len(record_list)}')
        print(f'd={d}已完成，已有微博{len(record_list)}条')

    with open('data/record.json', 'w', encoding='utf-8') as f:
        write(record_list)

def get_comments():

    with open('data/record.json', 'r', encoding='utf-8') as f:
        record_list = json.loads(f.read())
    n = 0
    for blog in record_list:
        try:
            url = 'https://weibo.com/ajax/statuses/buildComments?' \
                  'is_reload=1&' \
                  'id={}&' \
                  'is_show_bulletin=2&' \
                  'is_mix=0&' \
                  'count=10&' \
                  'type=feed&' \
                  'uid={}&' \
                  'fetch_level=0&' \
                  'locale=zh-CN'.format(
                blog['blog_id'], blog['user_id'])
            res = requests.get(url, headers=headers).json()
            comment_list = [remove_html(data['text']) for data in res['data']]
        except Exception as e:
            print(e)
            comment_list = []
        finally:
            blog['comments'] = comment_list
            n += 1
            if(n % 10 == 0):
                print(n)
            time.sleep(0.5)
    write(record_list)

def write(record_list):
    with open('data/record.json', 'w', encoding='UTF-8') as f:
        f.write(json.dumps(record_list, indent=2, ensure_ascii=False))

if __name__ == '__main__':
    main_worker()