#!/usr/bin/python3
from urllib import request, parse
from util import getAgentHeaders
from random import choice
import lxml.etree as le
import os

COOKIE = 'acw_tc=2760821b15771592124881783e3b1abbfaedfd95434405887d012b14a9f760; acw_sc__v2=5e018abb977187114d31dd211e8e3ff618ffaa02; uuid_tt_dd=10_19003208140-1577159356006-728039; dc_session_id=10_1577159356006.180932; dc_tos=q2zzy4; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1577159357; Hm_lpvt_6bcd52f51e9b3dce32bec4a3997715ac=1577159357; Hm_ct_6bcd52f51e9b3dce32bec4a3997715ac=6525*1*10_19003208140-1577159356006-728039; __yadk_uid=SJhfmL8NWblaGKrJkCMo7IOik3s2B8hI; firstDie=1; announcement=%257B%2522isLogin%2522%253Afalse%252C%2522announcementUrl%2522%253A%2522https%253A%252F%252Fblog.csdn.net%252Fblogdevteam%252Farticle%252Fdetails%252F103603408%2522%252C%2522announcementCount%2522%253A0%252C%2522announcementExpire%2522%253A3600000%257D'


def getProxy():
    with request.urlopen(
            "http://api.ip.data5u.com/dynamic/get.html?order=e93de4cbd91979ec97fe6688d2c67f98"
    ) as f:
        return f.read().decode('utf-8').strip()


def get(url, header={}):
    ph = request.ProxyHandler({'http': getProxy()})
    popenr = request.build_opener(ph)
    with popenr.open(request.Request(url, headers=header)) as f:
        return f.read().decode()


if __name__ == "__main__":
    # print(getProxy())
    # print(get('http://www.baidu.com/'))
    keyword = input("type a keyword you want to search: ").strip()

    links = []
    for i in range(1, 5):
        get_params = {'p': i, 'q': keyword}
        url = 'https://so.csdn.net/so/search/s.do?' + parse.urlencode(
            get_params)
        listpage = get(url)
        links += le.HTML(listpage).xpath(
            '//span[contains(@class, "fr")]/../span[@class="link"]/a/@href')
    links = [l.strip().replace('\n', '') for l in links]
    print(links)
    # print(len(links))
    os.mkdir(keyword)
    for l in links:
        res = get(l, getAgentHeaders(COOKIE))
        header = le.HTML(res).xpath('//h1[@class="title-article"]/text()')[0]
        # print(header)
        # exit(0)
        with open(keyword + '/' + header + '.html', 'w+') as f:
            f.write(res)

# link
# //span[contains(@class, "fr")]/../span[@class="link"]/a/text()
# article title
# //h1[@class="title-article"]/text()