#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2018/11/5 18:14
# @Author  : supuchun
# @Site    : 
# @File    : baidu_senu.py

# coding=utf-8

import os
import re
import time,json
import requests
from selenium import webdriver
from bs4 import BeautifulSoup

from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains

'''
目标:
根据头条号主页地址或UID
https://www.toutiao.com/c/user/3072139898/#mid=52012047610
得到文章列表

分析:
文章列表存在于动态链接中，格式如下，且需要cookie

https://www.toutiao.com/c/user/article/?page_type=1&user_id=3072139898&max_behot_time=0&count=20&as=A1754B50CB1A8EA&cp=5B0B4A88AE9A2E1&_signature=pjGAUBAe.Unz7SkR5ZQGg6YxgE

https://www.toutiao.com/c/user/article/?page_type=1&user_id=3072139898&max_behot_time=1524393165&count=20&as=A1E55BA0ABCA8FE&cp=5B0B3AC8EF3E2E1&_signature=pjGAUBAe.Unz7SkR5ZQrc6YxgE

https://www.toutiao.com/c/user/article/?page_type=1&user_id=3072139898&max_behot_time=1522397802&count=20&as=A1B5AB40ABDA938&cp=5B0BFAC95358FE1&_signature=pjGAUBAe.Unz7SkR5ZT-ZaYxgE

requert中，max_behot_time参数控制翻页，count控制每页数量，_signature是动态参数，且每页都会变

response中，next.max_behot_time控制下一页起始参数，has_more控制有无下一页，data返回列表集合
'''

def get_driver_by_proxy():
    options = webdriver.ChromeOptions()

    mobile_emulation = {"deviceName": "Nexus 5"}
    options.add_experimental_option("mobileEmulation", mobile_emulation)
    # 设置图片不显示
    #prefs = {"profile.managed_default_content_settings.images":2}
    #options.add_experimental_option("prefs",prefs)
    #options.add_argument('--headless')
    driver = webdriver.Chrome(chrome_options=options)

    #设置页面打开超时 时间， 15s
    driver.set_page_load_timeout(15)

    return driver

def main():
    task_id = 202202
    clue_id = 204603
    company_id = 200465
    user_id = 6590007101
    #return
    #user_id = 50537481940
    url = "http://m.baidu.com/"
    print(url)
    driver = get_driver_by_proxy()

    driver.get(url)
    driver.refresh()
    driver.implicitly_wait(2)
    #定义第一次下翻2000次，是触发key_down事件2000次，实际返回200多条文章的样子
    down_times = 500
    last_num = 0
    while True:
        for i in range(down_times):
            ActionChains(driver).key_down(Keys.DOWN).perform()
            print("已完成"+str(i)+"次")
            if i > 500:
                time.sleep(0.1)
            '''if not i % 100:
                #pass
                #time.sleep(1)
                try:
                    driver.find_element_by_xpath('//*[@id="wrapper"]/div[2]/div[2]/div/div[3]/div[2]')
                    print "loading"
                    time.sleep(1)
                    continue
                except Exception,err:
                    pass
            '''

        soup = BeautifulSoup(driver.page_source, 'html.parser')
        print(soup)
        body = soup.find_all(class_='link title')
        link_num = len(body)

        #如果执行完下翻操作后，文章总条数不增加，则证明可能翻到底了，结束
        if link_num == last_num:
            break

        #每次循环把当前文章数记录下凡
        last_num = link_num

        #减少除第一次以外的每次下翻次数
        #down_times = 500

    #print("=") * 30
    time.sleep(300)
    #driver.quit()

if __name__ == '__main__':
    main()


