import requests
import json
from urllib.parse import urlencode
from requests.exceptions import RequestException
import re
from bs4 import BeautifulSoup
import pymongo
from hashlib import md5
import os
from multiprocessing import Pool


MONGO_URL="localhost"
MONGO_DB="toutiao"
MONGO_TABLE="toutiao"

GROUP_START = 1 # 定义起始循环点
GROUP_END = 20 # 定义终止循环点
KEYWORD="街拍"

def get_page_index(offset, keyword):
    """传入要数据,获得详情页面"""
    data = {
        "autoload": "true",
        "count": 20,
        "cur_tab": 3,
        "format": "json",
        "from": "gallery",
        "keyword": keyword,
        "offset": offset,
    }
    url = "https://www.toutiao.com/search_content/?" + urlencode(data)
    try:
        response = requests.get(url)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        print("请求index页面失败")
        return None


def parse_page_index(html):
    """头条街拍图片存在一个json中,json中的data就是单个图集的信息"""
    data = json.loads(html)
    if data and "data" in data.keys():
        for item in data.get("data"):
            yield item.get("article_url")


def get_page_detail(url):
    """获得单个图集url的信息"""
    try:
        response = requests.get(url)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        print("请求详情页面失败")
        return None


def parse_page_detail(html, url):
    """解析单位个图集url的网页信息,因为信息存放于一个json文件中,这个json文件嵌套在html中,所以使用re解析"""
    # 先用BeautifulSoup获得title.
    soup = BeautifulSoup(html, "lxml")
    title = soup.select("title")[0].get_text()
    print(title)
    images_pattern = re.compile('parse\("(.*?)"\),', re.S)
    result = re.search(images_pattern, html)

    if result:
        data = result.group(1)
        data = json.loads(data)
        if data and "url_list" in data.keys():
            url_list = data.get("url_list")
            images = [item.get("url") for item in url_list]
            for image in images:
                # 调用函数下载图片到本地
                download_imag(image)
            # 返回数据,返回的数据存入mongodb
            return {
                "title": title,
                "images": images,
                "url": url,
            }


def save_to_mongo(result):
    client = pymongo.MongoClient(MONGO_URL, connect=False)
    db = client[MONGO_DB]
    if db[MONGO_TABLE].insert(result):
        print("insert into mongo success", result)
        return True
    else:
        return False


def download_imag(url):
    """# 下载图片到本地"""
    print("正在下载 :", url)
    try:
        response = requests.get(url)
        if response.status_code == 200:
            # response.content 返回二进制内容
            # response.text返回字符串
            save_image(response.content)
    except RequestException:
        print("请求图片失败", url)


def save_image(content):
    """解析图片url的信息,把获得的信息写入本地文件中"""
    file_path = '{0}/{1}.{2}'.format(os.getcwd(), md5(content).hexdigest(), "jpg")
    if not os.path.exists(file_path):
        with open(file_path, "wb") as f:
            f.write(content)


def main(offset):
    """主函数"""
    html = get_page_index(offset, KEYWORD)
    # 防止get_page_index没有取回数据
    if html is not None:
        for url in parse_page_index(html):
            html = get_page_detail(url)
            if html:
                result = parse_page_detail(html, url)
                save_to_mongo(result)
                print(result)
    else:
        print("get_page_index 函数没有取到数据")


if __name__ == "__main__":
    """使用多进程加快速度"""
    groups = [x * 20 for x in range(GROUP_START, GROUP_END)]
    pool = Pool()
    pool.map(main, groups)











