#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018.09.18 18:41 
# @Author : By Joker
# @File : download_video
# @Software: PyCharm
import io
import json
import os
import re
import sys
from contextlib import closing

import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from urllib3.exceptions import InsecureRequestWarning

# 防止中文乱码
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
# 去掉警告信息
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)


class InDouYin(object):
    def __init__(self, user_id, count, _signature, dytk):
        self.user_id = user_id
        self.count = count
        self.headers = {
            'authority': 'www.douyin.com',
            'method': 'GET',
            'path': '/aweme/v1/aweme/post/?user_id=57720812347&count=21&max_cursor=0&aid=1128&_signature=.1iWdRAZpP9Vd9b1WU1t1P9Ylm&dytk=4830f6e279a5f53872aab9e9dc112d33',
            'cookie': '_ga=GA1.2.304647671.1538101535; _gid=GA1.2.449706046.1538101535',
            'referer': 'https://www.douyin.com/share/user/57720812347?share_type=link',
            'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1'
        }
        mobile_emulation = {'deviceName': 'iPhone X'}
        # 浏览器模拟手机进行页面访问
        # 创建模拟器
        options = webdriver.ChromeOptions()
        options.add_experimental_option('mobileEmulation', mobile_emulation)
        # 加载谷歌驱动,并需要将此路径配置到环境变量中
        self.browser = webdriver.Chrome(
            executable_path=r'C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe',
            chrome_options=options)
        self._signature = _signature
        self.dytk = dytk
        self.url = 'https://www.douyin.com/aweme/v1/aweme/post/?user_id=%s&count=%s&max_cursor=0&aid=1128&_signature=%s&dytk=%s' % (
            self.user_id, self.count, self._signature, self.dytk)

    def handle_url(self):
        url_list = [self.url, ]
        #  加载页面
        self.browser.get(self.url)
        # 获取页面源码
        web_data = self.browser.page_source
        # 将源码解析成html格式
        html_soup = BeautifulSoup(web_data, 'lxml')
        # 获取pre标签内容
        web_data = html_soup.pre.string
        # 将json格式数据转换为字典
        web_data = json.loads(str(web_data))
        if web_data['status_code'] == 0:
            # 最大加载21条视频信息，has_more等于1表示还未全部加载完
            max_cursor = '0'
            while web_data['has_more'] == 1:
                max_cursor = web_data['max_cursor']
                # 获取时间戳
                url = 'https://www.douyin.com/aweme/v1/aweme/post/?user_id=%s&count=%s&max_cursor=%s&aid=1128&_signature=%s&dytk=%s' % (
                    self.user_id, self.count, max_cursor, self._signature, self.dytk)
                url_list.append(url)
                self.browser.get(url)
                web_data = self.browser.page_source
                html_soup = BeautifulSoup(web_data, 'lxml')
                web_data = html_soup.pre.string
                web_data = json.loads(str(web_data))
            else:
                # 获取时间戳
                url = 'https://www.douyin.com/aweme/v1/aweme/post/?user_id=%s&count=%s&max_cursor=%s&aid=1128&_signature=%s&dytk=%s' % (
                    self.user_id, self.count, max_cursor, self._signature, self.dytk)
                url_list.append(url)
        else:
            url_list = []
        return url_list

    def get_download_url(self, url_list):
        download_url = []
        title_list = []
        if len(url_list) > 0:
            # 遍历所有的下载地址
            for url in url_list:
                self.browser.get(url)
                web_data = self.browser.page_source
                html_soup = BeautifulSoup(web_data, 'lxml')
                web_data = html_soup.pre.string
                web_data = json.loads(str(web_data))
                if web_data['status_code'] == 0:
                    for i in range(len(web_data['aweme_list'])):
                        download_url.append(web_data['aweme_list'][i]['video']['play_addr']['url_list'][0])
                        title_list.append(
                            web_data['aweme_list'][i]['share_info']['share_desc'])
        return download_url, title_list

    def download_video(self, url, title):
        size = 0
        # 替换win中文件名不能出现的字符
        title = re.sub(r"[\/\\\:\*\?\"\<\>\|]", "", title)
        path = 'D:/videos/douyin/' + title + '.mp4'
        with closing(requests.get(url, headers=self.headers, stream=True, verify=False)) as response:
            chunk_size = 1024
            content_size = int(response.headers['content-length'])
            if response.status_code == 200:
                if os.path.exists(path):
                    print('文件: %s 已经存在' % path)
                else:
                    print('======================== %s is downloading > >>>>>>>>>> ' % title)
                    sys.stdout.write(
                        '======================== [File Size is ]: %0.2f MB\n' % (content_size / chunk_size / 1024))
                    with open(path, 'wb') as f:
                        for data in response.iter_content(chunk_size=chunk_size):
                            f.write(data)
                            size += len(data)
                            f.flush()
                            sys.stdout.write('[Progress]: %0.2f%%' % float(size / content_size * 100) + '\r')
                            sys.stdout.flush()
            else:
                print(response.status_code)

    def run(self):
        url_list = self.handle_url()
        download_url, title_list = self.get_download_url(url_list)
        for i in range(len(download_url)):
            url = download_url[i]
            title = title_list[i]
            self.download_video(url, title)


if __name__ == '__main__':
    # 创建对象
    # _signature值是实时更新的,将页面刷新一下,获取最新值,此值在20秒以内有效,获取之后,尽快执行代码
    douyin_spider = InDouYin('57720812347', 21, 'hUUIARAW3vYvakiBHTRvfYVFCB', '4830f6e279a5f53872aab9e9dc112d33')
    douyin_spider.run()
