# -*- coding: utf-8 -*-
import requests
from urllib.parse import unquote
from datetime_tools import format_date, sleep_random
from bs4 import BeautifulSoup
import csv
import os
from selenium import webdriver
# from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options


def get_video(next_url):
    driver = webdriver.Chrome(
        executable_path=r'C:\Users\Administrator\AppData\Local\Google\Chrome\Application\chromedriver.exe',
        options=chrome_options)
    # driver = webdriver.Firefox(executable_path=r'C:\Program Files\Mozilla Firefox\geckodriver.exe',
    #                            options=firefox_options)
    driver.get(next_url)
    try:
        ele = WebDriverWait(driver, 30).until(
            EC.presence_of_element_located((By.CSS_SELECTOR, '[node-type="common_video_player"]')))
        video_source = driver.find_element_by_css_selector(
            '[node-type="common_video_player"]').get_attribute('video-sources')
        r_video_url = unquote(video_source).split('720=')[-1]
        print(r_video_url)
        if 'http' in r_video_url:
            return r_video_url
        else:
            r_video_url = unquote(video_source.split('480=')[-1].split('&')[0])
            print(r_video_url)
            if 'http' in r_video_url:
                return r_video_url
            else:
                return None
    except:
        return None
    finally:
        driver.close()


# 我的微博
# 1093052462
# 个人工作室
# 6346966579
# 鞠婧祎的微博
# 3669102477
user_id = '3669102477'
base_dir = "D:/鞠婧祎/微博测试/"
if not os.path.exists(base_dir):
    os.makedirs(base_dir)

print('开始请求')
chrome_options = Options()
# firefox_options = Options()
# firefox_options.add_argument('--headless')
# firefox_options.add_argument('--disable-gpu')

headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
    'Host': 'm.weibo.cn',
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0'
    # 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'
}
login_headers = {
    'Accept': 'application/json, text/plain, */*',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive',
    'Host': 'm.weibo.cn',
    'mweibo-pwa': '1',
    'origin': 'https://m.weibo.cn',
    'x-requested-with': 'XMLHttpRequest',
    'Referer': 'https://m.weibo.cn/u/3669102477',
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0',
    'Cookie': '_T_WM=0066bbafbec70e4b74b6ff88f78957c0; hibext_instdsigdipv2=1; from=1110006030; WEIBOCN_FROM=1110006030; MLOGIN=1; M_WEIBOCN_PARAMS=lfid%3D102803%26luicode%3D20000174%26fid%3D1005053669102477%26uicode%3D10000011; SUB=_2A252mnLODeRhGeBI7lQS8SzOwj2IHXVSZR6GrDV6PUJbkdANLWHFkW1NRpVacR40_3yHTfIjsjYTfqN71FsZ1QM3; SUHB=079j8qxb09Qc_T; SCF=AsfcBVjiz0kcskVtMDqUdIezE3sxu6PbZHnm6NGIZSsxapLvtpOzc8_9QYkVYCmqAfw8AYtorZOYV_p0p5RQAmg.; SSOLoginState=1537082014'
    # 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.62 Safari/537.36'
}
session = requests.Session()
r = session.get('https://m.weibo.cn/u/' + user_id, headers=headers)
user_inf_id = unquote(r.cookies.get_dict()['M_WEIBOCN_PARAMS']).split('&')[0].split('=')[1]
print('个人详情ID获取完毕：' + user_inf_id)

params = {'type': 'uid', 'value': user_id, 'containerid': user_inf_id}
r = session.get('https://m.weibo.cn/api/container/getIndex', params=params)
jsonObj = r.json()
weibo_id = jsonObj['data']['tabsInfo']['tabs'][1]['containerid']
print('微博ID获取完毕：' + weibo_id)

params = {'type': 'uid', 'value': user_id, 'containerid': weibo_id}

page = 43
if page > 1:
    params.update({'page': page})

with open(base_dir + "content.txt", 'a', newline='', encoding='utf-8') as csvFile:
    csv_writer = csv.writer(csvFile)
    if not os.path.getsize(base_dir + "content.txt"):
        csv_writer.writerow(['id', 'mid', '创建日期', '转发', '评论', '赞', '微博内容', '备注1', '备注2'])

    while True:
        print('开始获取内容', end='')
        sleep_random()
        r = session.get('https://m.weibo.cn/api/container/getIndex', params=params, headers=login_headers)
        jsonObj = r.json()
        if jsonObj['ok'] == 0:
            print('没有内容，退出')
            break
        for card in jsonObj['data']['cards']:
            if card['card_type'] == 11:
                print('推荐关注内容，跳过')
                continue
            id = card['mblog']['id']
            mid = card['mblog']['mid']
            created_at = format_date(card['mblog']['created_at'])
            # 转发
            reposts_count = card['mblog']['reposts_count']
            # 评论
            comments_count = card['mblog']['comments_count']
            # 赞
            attitudes_count = card['mblog']['attitudes_count']
            text = BeautifulSoup(card['mblog']['text'], features="html.parser").text
            print('帖子id：{} '.format(id), 'mid：{} '.format(mid), created_at, reposts_count, comments_count, attitudes_count, text)
            content = [id, mid, created_at, reposts_count, comments_count, attitudes_count, text]
            # 图片处理
            if 'pics' in card['mblog']:
                for pic in card['mblog']['pics']:
                    sleep_random('获取微博图片')
                    pic_url = pic['large']['url']
                    image_name = pic_url.split('/')[-1]
                    image = requests.get(pic_url)
                    with open(base_dir + created_at + ' ' + image_name, 'wb') as image_file:
                        image_file.write(image.content)
            # 视频处理
            # if 'page_info' in card['mblog']:
            #     if card['mblog']['page_info']['type'] == 'webpage':
            #         print('可能有视频文件，正在检查', end='')
            #         sleep_random()
            #         try:
            #             if 'media_info' in card['mblog']['page_info']:
            #                 video_url = card['mblog']['page_info']['media_info']['stream_url']
            #             else:
            #                 r = session.get(card['mblog']['page_info']['page_url'])
            #                 bsObj = BeautifulSoup(r.text, features="html.parser")
            #                 video_url = bsObj.find('source').attrs['src']
            #                 if video_url.find('.') == -1:
            #                     video_url += '.mp4'
            #
            #             video_file_name = video_url.split('?')[0].split('/')[-1]
            #             video = requests.get(video_url)
            #             with open(base_dir + created_at + ' ' + video_file_name, 'wb') as video_file:
            #                 video_file.write(video.content)
            #         except:
            #             print('获取视频文件异常')
            #     elif card['mblog']['page_info']['type'] == 'video':
            #         print('可能有720p视频文件，正在检查', end='')
            #         sleep_random()
            #         page_url = card['mblog']['page_info']['page_url']
            #         page_url_end = page_url.split('?')[0].split('/')[-1]
            #         video_url = ''
            #         if page_url_end == 'getIndex':
            #             r = session.get(page_url)
            #             jsonObj = r.json()
            #             next_url = jsonObj['data']['cards'][0]['content_url']
            #             video_url = get_video(next_url)
            #         elif page_url_end == 'index':
            #             next_url = page_url
            #             video_url = get_video(next_url)
            #         if not video_url:
            #             video_url = card['mblog']['page_info']['media_info']['stream_url']
            #         if video_url:
            #             video_file_name = video_url.split('?')[0].split('/')[-1]
            #             if '.' not in video_file_name:
            #                 video_file_name += '.mp4'
            #             video = requests.get(video_url)
            #             with open(base_dir + created_at + ' ' + video_file_name, 'wb') as video_file:
            #                 video_file.write(video.content)
            #         else:
            #             content.append('获取失败可能是外链，URL:{}'.format(page_url))
            #     else:
            #         print(card['mblog']['page_info']['type'] + '被忽略了，可能是外链')
            #         content.append(card['mblog']['page_info']['type'] + '被忽略了，可能是外链-URL:{}'.format(card['mblog']['page_info']['page_url']))
            if 'retweeted_status' in card['mblog']:
                retweeted_id = card['mblog']['retweeted_status']['id']
                content.append('转发的微博:{}'.format(retweeted_id))
                print('转发的微博，ID:{}'.format(retweeted_id))
            csv_writer.writerow(content)
        # page = jsonObj['data']['cardlistInfo']['page']
        print('第{}页内容获取完毕'.format(page))
        page = page + 1
        print('-------------------------------------------------------------------------------------------------------')
        params = {'type': 'uid', 'value': user_id, 'containerid': weibo_id, 'page': page}
        print('开始获取第{}页内容'.format(page))
