# -*- coding:utf-8 -*-

"""
广场舞爬虫下载，需要配合 you-get 命令
lulee007<lulee007@live.com>
"""
import json
import random
import time
from urllib.parse import urlparse, parse_qs

import requests
from bs4 import BeautifulSoup
from selenium import webdriver

from config import logger

UA = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
browser = None


def fetch_video(title, url):
    global browser
    video_list_template_url = "http://list.youku.com/show/point?id=310246&stage={0}&callback=jQuery111206797559198172303_{1}&_={1}"
    print("开始抓取{0}广场舞".format(title))
    headers = {
        'User-Agent': UA
    }
    browser.get(url)
    video_list = browser.find_elements_by_css_selector(".p-tab-pills li")
    data_url_list = []
    for item in video_list:
        data_id = item.get_attribute("data-id")
        now = str(int(time.time() * 1000000) + random.randint(0, 1000))
        data_url_list.append(video_list_template_url.format(data_id, now, now))
        print(data_id)
    logger.debug("获取视频列表段成功")
    video_urls = []
    for _url in data_url_list:
        url_c = urlparse(_url)
        queries = parse_qs(url_c.query)
        data = requests.get(url=_url, headers=headers).text
        start = "_{0}(".format(queries['_'][0])
        json_start = data.find(start)
        json_end = data.rfind(");")
        result = json.loads(data[json_start + len(start):json_end])
        html = BeautifulSoup(result['html'], 'lxml')
        a_list = html.select(".p-thumb > a")
        for a in a_list:
            video_urls.append('http:' + a['href'] + '\r\n')
        print("获取了视频列表段，再次等待两秒，{0}".format(_url))
        time.sleep(2)
    with open('output/list-{0}.txt'.format(title), 'a', encoding='utf8') as w:
        w.writelines(video_urls)

    print("结束")


def get_chrome_html(url):
    global browser
    logger.info("开始爬取：{0}".format(url))
    browser = webdriver.Chrome('drivers/chromedriver-mac')
    html = browser.get(url)
    return html


def fetch_cookies(url):
    s = requests.Session()
    headers = {"User-Agent": UA}
    s.headers.update(headers)
    r = s.get(url)
    print("已经获取到cookies:{0}".format(r.url))
    return s.cookies


if __name__ == '__main__':
    print("开始爬取")
    try:
        browser = webdriver.Chrome('drivers/chromedriver-mac')
        urls = [
            # ("王广成", "http://list.youku.com/show/id_z31038211df6911e68fae.html"),# 已经被包括在 糖豆 中
            ("糖豆", "http://list.youku.com/show/id_zbbba3dba73d311e6b9bb.html"),
        ]
        for _title, _url in urls:
            fetch_video(title=_title, url=_url)
    except Exception as e:
        logger.error(e)
