# coding=utf-8

import json
import os
import time
import bhttp
import console
import sys
from lxml import etree
from lxml.html.clean import Cleaner

WORK_PATH = os.getcwd()
# 爬取页数，一页有20条数据
GET_COUNT = 1000
GTE_SLEEP = 1


def get_filename(index, suffix):
    time_tuple = time.localtime(time.time())
    t = ''
    for i in range(5):
        # print(time_tuple[i])
        t = t + str(time_tuple[i]) + '_'
    return t + str(index) + '.' + suffix


def trans_format(time_string, from_format, to_format='%Y.%m.%d %H:%M:%S'):
    """
    @note 时间格式转化
    :param time_string:
    :param from_format:
    :param to_format:
    :return:
    """
    time_struct = time.strptime(time_string, from_format)
    times = time.strftime(to_format, time_struct)
    return times


class Reptile:
    def __init__(self):
        print('Reptile init')
        self.save_path = './video'
        # create paths
        if not os.path.exists(self.save_path):
            os.makedirs(self.save_path)
        if not os.path.exists('cookie.txt'):
            open('cookie.txt', 'w')
        self.cookie = open('cookie.txt', 'r').read()
        self.header = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.1 Safari/605.1.15",
            "Cookie": self.cookie
        }
        self.index = 0
        self.suffix = ('.mp4', '.txt')

    def __download_video(self, video_link, file_name) -> None:
        # print(video_link)
        file_name: str = os.path.join(self.save_path, file_name)
        head = self.header
        response = bhttp.get(url=video_link, headers=head, timeout=(30, 10))
        assert response.status_code == 200, 'f 下载视频接口访问失败'
        video_data = response.content
        with open(file_name, 'wb') as f:
            f.write(video_data)

    def __download_content(self, content_link, page_code, file_name) -> None:
        # 编辑head
        file_name: str = os.path.join(self.save_path, file_name)
        print(file_name)
        head = self.header
        post_data = 'data={"Component_Play_Playinfo":{"oid":"' + page_code + '"}}'
        head.update({"Referer": f"https://weibo.com/tv/show/{page_code}?from=old_pc_videoshow"})
        head.update({"Content-Type": "application/x-www-form-urlencoded"})
        response = bhttp.post(url=content_link, headers=head, data=post_data, timeout=(30, 10))
        assert response.status_code == 200, f'获取视频内容接口请求失败'
        # print(response.json())
        component_html = response.json()['data']['Component_Play_Playinfo']['text']
        # print(component_html)
        cl = Cleaner()
        cl.remove_tags = ['a', 'p', 'div']
        component_html = cl.clean_html(component_html)
        component_content = etree.HTML(component_html).xpath('//div/text() | //span/text()')
        assert len(component_content) > 0, 'component_content 长度不大于0'
        component_content = component_content[0]
        print(component_content)
        with open(file_name, 'w') as f:
            f.write(component_content)

    def __get_content(self, rjson) -> (str, str):
        assert 'h5_url' in rjson['page_info']['media_info']
        page_code = rjson['page_info']['media_info']['h5_url'].split('fid=')[1]
        return f'https://weibo.com/tv/api/component?page=/tv/show/{page_code}', page_code

    def __get_video_list_json(self, page) -> dict:
        """

        :param page: 要访问的视频主页page码
        :return: 返回json数据
        """
        api: str = 'https://weibo.com/ajax/profile/getWaterFallContent?uid=7099422177&cursor={}'.format(page)
        # print(api)
        head = self.header
        head.update({"Accept": "application/json, text/plain, */*"})
        response = bhttp.get(url=api, headers=head, timeout=(30, 10))
        assert response.status_code == 200, f'{sys._getframe().f_code.co_name} response failed!'
        assert response.json()['ok'] == 1, f'{sys._getframe().f_code.co_name} response.json()["ok"] != 1'
        # print(response.text)
        assert 'data' in response.json(), f'{sys._getframe().f_code.co_name} no data parameter'
        assert 'next_cursor' in response.json()['data'], '没有找到吓一跳视频！'
        return response.json()['data']['list'], response.json()['data']['next_cursor']

    def __run(self, page) -> int:
        # 获取当前页json数据
        video_list_json, next_cursor = self.__get_video_list_json(page)
        print(f'下一条视频 {next_cursor}')
        # 开始解析json内容
        for vjson in video_list_json:
            self.index = self.index+1
            # 拼接文件名
            assert 'created_at' in vjson, f'{sys._getframe().f_code_co_name} no created_at parameter'

            created_at: str = trans_format(vjson['created_at'], '%a %b %d %H:%M:%S +0800 %Y', '%Y_%m_%d_%H_%M_%S')
            file_name = f'{created_at}__{str(self.index)}'
            # print(file_name)
            # 开始下载视频
            self.__download_video(vjson['page_info']['media_info']['mp4_sd_url'], f'{file_name}{self.suffix[0]}')
            content_link, page_code = self.__get_content(vjson)
            print(content_link)
            # print(page_code)
            self.__download_content(content_link, page_code, f'{file_name}{self.suffix[1]}')
        return next_cursor

    def start_reptile(self, count) -> int:
        """
        :return: 返回爬取数据条目数
        """
        index: int = 0
        page_code: int = 0
        for index in range(count):
            page_code = self.__run(page_code)
        return index


if __name__ == '__main__':
    r = Reptile()
    r.start_reptile(GET_COUNT)
