import requests,re
from urllib.parse import urlencode
from urllib.request import urlretrieve
import json
import os,time
from lxml import etree

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
}
title = ''
def parse_url(url, data_type): # 发送url请求
    r = requests.get(url, headers=headers)
    print(r.json())
    if data_type == 'json':
        return r.json()
    elif data_type == 'text':
        return r.text
    elif data_type == 'content':
        return r.content
    else:
        print('请传入要获取的类型')
        
def get_url_list(): # 获取url组成的列表
    params = {
        'offset': 0,
        'format': 'json',
        'keyword': '街拍',
        'autoload': 'true',
        'count': 20,
        'cur_tab': 1,
        'from': 'search_tab'
    }
    url = 'https://so.toutiao.com/search/?'+urlencode(params)
    print(url)
    datas = parse_url(url, 'json')
    return datas['rawData']['data']
def get_data_list(html_str): 
    html = etree.HTML(html_str)
    global title
    title = html.xpath('//title/text()')[0].strip()
    pattern = re.compile('gallery: JSON.parse\("(.*?)"\),', re.S)
    result = re.search(pattern, html_str)
    if result:
        json_str = result.group(1)
        json_str = json_str.strip().replace("\\",'')
        json_list = json.loads(json_str, encoding='utf-8')
        for img_url in json_list['sub_images']:
            yield img_url['url']

def parse_inner_url(url_list): # 解析url返回每页请求html字符串的 生成器
    for url in url_list:
        html_str = parse_url(url, 'text')
        yield html_str
    

def save_data_local(url, name): # 保存数据到本地
    # os.getcwd()
    global title
    mk_images = os.path.join(os.getcwd(),'images')
    print(mk_images)
    if not os.path.isdir(mk_images):
        os.mkdir(mk_images)
    mk_path = os.path.join(os.getcwd(),'images',title)
    if not os.path.isdir(mk_path):
        os.mkdir(mk_path)
    else:
        pass
        # print('文件夹已存在！！')
    print('正在下载图片%s.jpg请稍后...' % name)
    path = os.path.join(os.getcwd(), 'images', title, '%s.jpg' % name)
    time.sleep(2)
    urlretrieve(url, path)
    print('图片：%s.jpg 保存成功!!!' % name)
    

def save_data_mongo(): # 保存数据到mongodb
    pass

def main(): # 主要逻辑

    #1. 获取每页中的url列表
    url_list = get_url_list()

    #2. 解析url获取html
    html_str = parse_inner_url(url_list)

    #3. 获取url中的数据
    for html in html_str:
        img_url = get_data_list(html)
        for url in img_url:
            #4. 保存数据到本地
            name = url.split('/')[-1]
            save_data_local(url, name)
        print('下载成功一个图集！！！ ')
    #5. 保存数据到mongodb

if __name__ == '__main__':
    main()