import requests
import os
from urllib.parse import unquote
from datetime_tools import format_date, sleep_random, get_now
from bs4 import BeautifulSoup
import csv
import re
import shutil


class KikuSpider:
    def __init__(self):
        self.config = [
            {'user_id': '3669102477', 'base_dir': r"D:/鞠婧祎/微博内容/", 'name': '鞠婧祎的微博'},
            {'user_id': '3753348253', 'base_dir': r"D:/鞠婧祎/微博内容小号Hahhhnxsm/", 'name': '鞠婧祎的微博小号'},
            {'user_id': '6346966579', 'base_dir': r"D:/鞠婧祎/个人工作室/", 'name': '鞠婧祎的个人工作室'}
        ]
        self.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Host': 'm.weibo.cn',
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0',
        }
        self.login_headers = {
            'Accept': 'application/json, text/plain, */*',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
            'Cache-Control': 'no-cache',
            'Connection': 'keep-alive',
            'Host': 'm.weibo.cn',
            'mweibo-pwa': '1',
            'origin': 'https://m.weibo.cn',
            'Pragma': 'no-cache',
            'x-requested-with': 'XMLHttpRequest',
            'TE': 'Trailers',
            # 'Referer': 'https://m.weibo.cn/u/3669102477',
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0',
            'Cookie': '_T_WM=18944806254; XSRF-TOKEN=892d84; WEIBOCN_FROM=1110006030; MLOGIN=1; M_WEIBOCN_PARAMS=luicode%3D20000174%26fid%3D1005053669102477%26uicode%3D10000011; SUB=_2A25yotdMDeRhGeBI7lQS8SzOwj2IHXVubPkErDV6PUJbktANLXnQkW1NRpVacYTJ6Te1P374M_1KBw-QQRnYDADd; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWEzR_Ukb14DGjFU3TamRn05NHD95QcSo-ce02Eeo.pWs4Dqcjpi--ciK.Ni-27i--ciK.fiKyhi--ciK.4iK.0i--ciK.4iK.0i--fi-ihi-iWeo57entt; SSOLoginState=1604757277'
        }

    def begin_spider(self):
        for user in self.config:
            self.spider_one(user)

    def spider_one(self, user: dict):
        user_id = user['user_id']
        base_dir = user['base_dir']
        if not os.path.exists(base_dir):
            os.makedirs(base_dir)
        print('开始请求')
        session = requests.Session()

        r = session.get('https://m.weibo.cn/u/' + user_id, headers=self.headers)
        print(r.cookies.get_dict())
        user_inf_id = unquote(r.cookies.get_dict()['M_WEIBOCN_PARAMS']).split('&')[0].split('=')[1]
        print('个人详情ID获取完毕：' + user_inf_id)

        params = {'type': 'uid', 'value': user_id, 'containerid': user_inf_id}
        r = session.get('https://m.weibo.cn/api/container/getIndex', params=params, headers=self.login_headers)
        print(r.cookies.get_dict())
        jsonObj = r.json()
        weibo_id = jsonObj['data']['tabsInfo']['tabs'][1]['containerid']
        print('微博ID获取完毕：' + weibo_id)

        end_id = None
        old_content = []
        new_content = []
        if not os.path.exists(os.path.join(base_dir, 'content.txt')):
            new_content.append(['id', 'mid', '创建日期', '转发', '评论', '赞', '微博内容', '备注'])
        else:
            with open(os.path.join(base_dir, 'content.txt'), 'r', encoding='utf-8') as csvFile:
                reader = csv.reader(csvFile)
                for count, line in enumerate(reader):
                    if count == 1:
                        end_id = line[0]
            with open(os.path.join(base_dir, 'content.txt'), 'r', encoding='utf-8') as csvFile:
                reader = csv.reader(csvFile)
                old_content = list(reader)
        params = {'type': 'uid', 'value': user_id, 'containerid': weibo_id}
        next_page = 1
        card_list = []
        while True:
            sleep_random('开始获取微博信息')
            r = session.get('https://m.weibo.cn/api/container/getIndex', params=params, headers=self.login_headers)
            jsonObj = r.json()
            if jsonObj['ok'] == 0:
                print('没有内容，退出')
                break
            is_end = False
            for card in jsonObj['data']['cards']:
                if card['card_type'] == 11:
                    print('推荐关注内容，跳过')
                else:
                    card_id = card['mblog']['id']
                    if card_id == end_id:
                        print('检测到最后抓取的ID，退出')
                        is_end = True
                        break
                    card_list.insert(0, card)
            if is_end:
                break
            print('第{}页内容获取完毕'.format(next_page))
            next_page += 1
            print('-' * 50)
            params = {'type': 'uid', 'value': user_id, 'containerid': weibo_id, 'page': next_page}
            print('开始获取第{}页内容'.format(next_page))
        try:
            for card in card_list:
                sleep_random('开始获取实际内容')
                card_id = card['mblog']['id']
                mid = card['mblog']['mid']
                created_at = format_date(card['mblog']['created_at'])
                # 转发
                reposts_count = card['mblog']['reposts_count']
                # 评论
                comments_count = card['mblog']['comments_count']
                # 赞
                attitudes_count = card['mblog']['attitudes_count']
                text = BeautifulSoup(card['mblog']['text'], features="html.parser").text
                print('帖子id：{} '.format(card_id), 'mid：{} '.format(mid), created_at, reposts_count, comments_count, attitudes_count, text)
                content = [card_id, mid, created_at, reposts_count, comments_count, attitudes_count, text.replace('\n', ' ')]
                text = re.sub(r'[\\/:*?"<>|]', '', text).replace('\n', '')
                # live 处理
                live_dict = {}
                if 'pic_video' in card['mblog'] and card['mblog']['pic_video']:
                    live_list = card['mblog']['pic_video'].split(',')
                    for item_string in live_list:
                        item = item_string.split(':')
                        live_dict[item[0]] = item[1]
                print('live dict：', live_dict)
                # 图片处理
                if 'pics' in card['mblog']:
                    for i, pic in enumerate(card['mblog']['pics']):
                        pic_url = pic['large']['url']
                        image_name = pic_url.split('/')[-1]
                        src_image = base_dir + created_at + ' ' + image_name
                        if os.path.exists(src_image):
                            dst_image = base_dir + created_at + ' ' + text + ' ' + str(i) + '.' + image_name.split('.')[-1]
                            os.rename(src_image, dst_image)
                        else:
                            # with open(base_dir + "log.txt", 'a', newline='', encoding='utf-8') as logFile:
                                # log_writer = csv.writer(logFile)
                                # log_writer.writerow([src_image, '不存在'])
                            dst_image_file = base_dir + created_at + ' ' + text + ' ' + str(i) + '.' + image_name.split('.')[-1]
                            if os.path.exists(dst_image_file):
                                print(dst_image_file, '存在跳过')
                            else:
                                image = requests.get(pic_url)
                                with open(dst_image_file, 'wb') as image_file:
                                    image_file.write(image.content)
                                sleep_random('获取微博图片完毕')
                        if live_dict and (str(i) in live_dict):
                            live_file_name = base_dir + created_at + ' ' + text + ' ' + str(i) + '.MOV'
                            if os.path.exists(live_file_name):
                                print(live_file_name, '存在跳过')
                            else:
                                live = requests.get(f'https://video.weibo.com/media/play?livephoto=//us.sinaimg.cn/{live_dict[str(i)]}.mov&KID=unistore,videomovSrc')
                                with open(live_file_name, 'wb') as live_file:
                                    live_file.write(live.content)

                # 视频处理
                if 'page_info' in card['mblog']:
                    if card['mblog']['page_info']['type'] == 'webpage':
                        sleep_random('可能有视频文件，正在检查')
                        page_url = card['mblog']['page_info']['page_url']
                        try:
                            r = session.get(page_url)
                            location_url = r.url
                            story_url = location_url.replace('index', 'object')
                            r = session.get(story_url)
                            jsonObj = r.json()
                            if 'ok' in jsonObj and jsonObj['ok'] == 1:
                                video_url = jsonObj['data']['object']['stream']['url']
                                # if 'media_info' in card['mblog']['page_info']:
                                #     video_url = card['mblog']['page_info']['media_info']['stream_url']
                                # else:
                                #     r = session.get(card['mblog']['page_info']['page_url'])
                                #     bsObj = BeautifulSoup(r.text, features="html.parser")
                                #     video_url = bsObj.find('source').attrs['src']
                                #     if video_url.find('.') == -1:
                                #         video_url += '.mp4
                                video_file_name = text + '.' + video_url.split('?')[0].split('/')[-1].split('.')[-1]
                                video = requests.get(video_url)
                                with open(base_dir + created_at + ' ' + video_file_name, 'wb') as video_file:
                                    video_file.write(video.content)
                        except Exception as e:
                            print('获取视频文件异常，可能是外链，URL:{}'.format(page_url), e)
                            content.append('获取失败可能是外链，URL:{}'.format(page_url))
                    elif card['mblog']['page_info']['type'] == 'video':
                        sleep_random('可能有720p视频文件，正在检查')
                        print('调试信息mp4_720p_mp4----', card['mblog']['page_info']['media_info']['mp4_720p_mp4'])
                        video_url = None
                        if card['mblog']['page_info']['media_info']['mp4_720p_mp4'] != '':
                            video_url = card['mblog']['page_info']['media_info']['mp4_720p_mp4']
                        elif card['mblog']['page_info']['media_info']['mp4_hd_url'] != '':
                            video_url = card['mblog']['page_info']['media_info']['mp4_hd_url']
                        elif card['mblog']['page_info']['media_info']['stream_url_hd'] != '':
                            video_url = card['mblog']['page_info']['media_info']['stream_url_hd']
                        elif card['mblog']['page_info']['media_info']['stream_url'] != '':
                            video_url = card['mblog']['page_info']['media_info']['stream_url']
                        try:
                            if video_url:
                                video_file_name = text + '.' + video_url.split('?')[0].split('/')[-1].split('.')[-1]
                                if '.' not in video_file_name:
                                    video_file_name += '.mp4'
                                video = requests.get(video_url)
                                with open(base_dir + created_at + ' ' + video_file_name, 'wb') as video_file:
                                    video_file.write(video.content)
                            else:
                                print('视频文件URL异常')
                        except Exception as e:
                            print('获取视频文件异常', e)
                            content.append('获取失败可能是外链，URL:{}'.format(video_url))
                    else:
                        print(card['mblog']['page_info']['type'] + '被忽略了，可能是外链')
                        content.append(card['mblog']['page_info']['type'] + '被忽略了，可能是外链-URL:{}'.format(card['mblog']['page_info']['page_url']))
                if 'retweeted_status' in card['mblog']:
                    retweeted_id = card['mblog']['retweeted_status']['id']
                    content.append('转发的微博:{}'.format(retweeted_id))
                    print('转发的微博，ID:{}'.format(retweeted_id))
                new_content.insert(0, content)
        except Exception as e:
            print('有异常发生:', e)
        finally:
            if new_content:
                shutil.copyfile(os.path.join(base_dir, 'content.txt'),
                                os.path.join(base_dir, 'content 备份 ' + get_now() + '.txt'))
                old_content[1:1] = new_content
                with open(os.path.join(base_dir, 'content.txt'), 'w', newline='', encoding='utf-8') as csvFile:
                    csv_writer = csv.writer(csvFile)
                    csv_writer.writerows(old_content)


if __name__ == '__main__':
    kikuSpider = KikuSpider()
    kikuSpider.begin_spider()
