import csv
import re
from urllib.parse import unquote

import os
import requests
from bs4 import BeautifulSoup

from datetime_tools import format_date, sleep_random
from main_all_rename import get_video

if __name__ == '__main__':
    # 我的微博
    # 1093052462
    # 个人工作室
    # 6346966579
    # 鞠婧祎的微博
    # 3669102477
    # user_id = '6346966579'
    user_id = '3669102477'
    base_dir = "D:/鞠婧祎/微博内容测试/"
    if not os.path.exists(base_dir):
        os.makedirs(base_dir)
    print('开始请求')
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'Host': 'm.weibo.cn',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',
        # 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'
    }
    headers2 = {
        'Accept': 'application/json, text/plain, */*',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        'Cache-Control': 'max-age=0',
        'Connection': 'keep-alive',
        'Host': 'm.weibo.cn',
        'mweibo-pwa': '1',
        'origin': 'https://m.weibo.cn',
        'x-requested-with': 'XMLHttpRequest',
        'Referer': 'https://m.weibo.cn/u/3669102477',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',
        'Cookie': '_T_WM=0066bbafbec70e4b74b6ff88f78957c0; hibext_instdsigdipv2=1; from=1110006030; WEIBOCN_FROM=1110006030; MLOGIN=1; M_WEIBOCN_PARAMS=lfid%3D102803%26luicode%3D20000174%26fid%3D1005053669102477%26uicode%3D10000011; SUB=_2A252mnLODeRhGeBI7lQS8SzOwj2IHXVSZR6GrDV6PUJbkdANLWHFkW1NRpVacR40_3yHTfIjsjYTfqN71FsZ1QM3; SUHB=079j8qxb09Qc_T; SCF=AsfcBVjiz0kcskVtMDqUdIezE3sxu6PbZHnm6NGIZSsxapLvtpOzc8_9QYkVYCmqAfw8AYtorZOYV_p0p5RQAmg.; SSOLoginState=1537082014'
        # 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'
    }

    session = requests.Session()

    r = session.get('https://m.weibo.cn/u/' + user_id, headers=headers)
    print(r.cookies.get_dict())
    user_inf_id = unquote(r.cookies.get_dict()['M_WEIBOCN_PARAMS']).split('&')[0].split('=')[1]
    print('个人详情ID获取完毕：' + user_inf_id)

    params = {'type': 'uid', 'value': user_id, 'containerid': user_inf_id}
    r = session.get('https://m.weibo.cn/api/container/getIndex', params=params, headers=headers2)
    print(r.cookies.get_dict())
    jsonObj = r.json()
    weibo_id = jsonObj['data']['tabsInfo']['tabs'][1]['containerid']
    print('微博ID获取完毕：' + weibo_id)

    params = {'type': 'uid', 'value': user_id, 'containerid': weibo_id}
    cards = []
    begin = 0
    count = 1
    fcount = 0
    page = 1
    next_page = page
    if page > 1:
        params.update({'page': page})

    while True:
        sleep_random('开始获取内容')
        r = session.get('https://m.weibo.cn/api/container/getIndex', params=params, headers=headers2)
        jsonObj = r.json()
        if jsonObj['ok'] == 0:
            print('没有内容，退出')
            break
        if page == next_page:
            temp_cards = jsonObj['data']['cards'][begin:]
        else:
            temp_cards = jsonObj['data']['cards']
        for t in temp_cards:
            if t['card_type'] == 11:
                print('推荐关注内容，跳过')
            else:
                cards.append(t)
                fcount += 1
                if fcount >= count:
                    break
        if fcount >= count:
            break
        print('第{}页内容获取完毕'.format(page))
        next_page = next_page + 1
        print('-------------------------------------------------------------------------------------------------------')
        params = {'type': 'uid', 'value': user_id, 'containerid': weibo_id, 'page': next_page}
        print('开始获取第{}页内容'.format(next_page))

    with open(base_dir + "content.txt", 'a', newline='', encoding='utf-8') as csvFile:
        csv_writer = csv.writer(csvFile)
        if not os.path.getsize(base_dir + "content.txt"):
            csv_writer.writerow(['id', 'mid', '创建日期', '转发', '评论', '赞', '微博内容', '备注'])

        for card in cards:
            id = card['mblog']['id']
            mid = card['mblog']['mid']
            created_at = format_date(card['mblog']['created_at'])
            # 转发
            reposts_count = card['mblog']['reposts_count']
            # 评论
            comments_count = card['mblog']['comments_count']
            # 赞
            attitudes_count = card['mblog']['attitudes_count']
            text = BeautifulSoup(card['mblog']['text'], features="html.parser").text
            print('帖子id：{} '.format(id), 'mid：{} '.format(mid), created_at, reposts_count, comments_count, attitudes_count,
                  text)
            content = [id, mid, created_at, reposts_count, comments_count, attitudes_count, text]
            # 图片处理
            if 'pics' in card['mblog']:
                for i, pic in enumerate(card['mblog']['pics']):
                    pic_url = pic['large']['url']
                    image_name = pic_url.split('/')[-1]
                    src_image = base_dir + created_at + ' ' + image_name
                    text = re.sub(r'[\\/:*?"<>|]', '', text).replace('\n', '')
                    if os.path.exists(src_image):
                        dst_image = base_dir + created_at + ' ' + text + ' ' + str(i) + '.' + image_name.split('.')[-1]
                        os.rename(src_image, dst_image)
                    else:
                        with open(base_dir + "log.txt", 'a', newline='', encoding='utf-8') as logFile:
                            log_writer = csv.writer(logFile)
                            log_writer.writerow([src_image, '不存在'])
                            dst_image_file = base_dir + created_at + ' ' + text + ' ' + str(i) + '.' + \
                                             image_name.split('.')[-1]
                            if os.path.exists(dst_image_file):
                                log_writer.writerow([dst_image_file, '存在跳过'])
                            else:
                                image = requests.get(pic_url)
                                with open(dst_image_file, 'wb') as image_file:
                                    image_file.write(image.content)
                                sleep_random('获取微博图片完毕')
            # 视频处理
            if 'page_info' in card['mblog']:
                text = re.sub(r'[\\/:*?"<>|]', '', text).replace('\n', '')
                if card['mblog']['page_info']['type'] == 'webpage':
                    sleep_random('可能有视频文件，正在检查')
                    try:
                        if 'media_info' in card['mblog']['page_info']:
                            video_url = card['mblog']['page_info']['media_info']['stream_url']
                        else:
                            r = session.get(card['mblog']['page_info']['page_url'])
                            bsObj = BeautifulSoup(r.text, features="html.parser")
                            video_url = bsObj.find('source').attrs['src']
                            if video_url.find('.') == -1:
                                video_url += '.mp4'

                        video_file_name = text + '.' + video_url.split('?')[0].split('/')[-1].split('.')[-1]
                        video = requests.get(video_url)
                        with open(base_dir + created_at + ' ' + video_file_name, 'wb') as video_file:
                            video_file.write(video.content)
                    except:
                        print('获取视频文件异常')
                elif card['mblog']['page_info']['type'] == 'video':
                    sleep_random('可能有720p视频文件，正在检查')
                    page_url = card['mblog']['page_info']['page_url']
                    page_url_end = page_url.split('?')[0].split('/')[-1]
                    video_url = ''
                    if page_url_end == 'getIndex':
                        r = session.get(page_url)
                        jsonObj = r.json()
                        next_url = jsonObj['data']['cards'][0]['content_url']
                        video_url = get_video(next_url)
                    elif page_url_end == 'index':
                        next_url = page_url
                        video_url = get_video(next_url)
                    if not video_url:
                        video_url = card['mblog']['page_info']['media_info']['stream_url']
                    if video_url:
                        video_file_name = text + '.' + video_url.split('?')[0].split('/')[-1].split('.')[-1]
                        if '.' not in video_file_name:
                            video_file_name += '.mp4'
                        video = requests.get(video_url)
                        with open(base_dir + created_at + ' ' + video_file_name, 'wb') as video_file:
                            video_file.write(video.content)
                    else:
                        content.append('获取失败可能是外链，URL:{}'.format(page_url))
                else:
                    print(card['mblog']['page_info']['type'] + '被忽略了，可能是外链')
                    content.append(card['mblog']['page_info']['type'] + '被忽略了，可能是外链-URL:{}'.format(
                        card['mblog']['page_info']['page_url']))
            # if 'pics' in card['mblog']:
            #     for pic in card['mblog']['pics']:
            #         print('获取微博图片', end='')
            #         sleep_random()
            #         time.sleep(random.randint(3, 15))
            #         pic_url = pic['large']['url']
            #         image_name = pic_url.split('/')[-1]
            #         image = requests.get(pic_url)
            #         with open(base_dir + created_at + ' ' + image_name, 'wb') as image_file:
            #             image_file.write(image.content)
            # if 'page_info' in card['mblog']:
            #     print('获取视频文件', end='')
            #     sleep_random()
            #     try:
            #         if 'media_info' in card['mblog']['page_info']:
            #             video_url = card['mblog']['page_info']['media_info']['stream_url']
            #         else:
            #             r = session.get(card['mblog']['page_info']['page_url'])
            #             bsObj = BeautifulSoup(r.text, features="html.parser")
            #             video_url = bsObj.find('source').attrs['src']
            #
            #         video_file_name = video_url.split('?')[0].split('/')[-1]
            #         video = requests.get(video_url)
            #         with open(base_dir + created_at + ' ' + video_file_name, 'wb') as video_file:
            #             video_file.write(video.content)
            #     except:
            #         print('获取视频文件异常')
            if 'retweeted_status' in card['mblog']:
                retweeted_id = card['mblog']['retweeted_status']['id']
                content.append('转发的微博:{}'.format(retweeted_id))
                print('转发的微博，ID:{}'.format(retweeted_id))
            csv_writer.writerow(content)
