import os
import re
import json
import requests
import pymongo
from hashlib import md5
from bs4 import BeautifulSoup
from urllib.parse import urlencode
from requests.exceptions import RequestException
from multiprocessing import Pool
from json.decoder import JSONDecodeError
from config import *

# connect = False 为了解决多进程带来的问题，什么问题？
client = pymongo.MongoClient(MONGO_URL, connect = False)
db = client[MONGO_DB]

headers = {"user-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"}

def get_page_index(offset, keyword):
    """
    搜索页的请求
    """
    data = {
        'offset': offset,
        'format': 'json',
        'keyword': keyword,
        'autoload': 'true',
        'count': '20',
        'cur_tab': 1,
        'from': 'search_tab',
    }
    url = 'https://www.toutiao.com/search_content/?' + urlencode(data)
    try:
        response = requests.get(url)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        print('请求索引页出错')
        return None


def parse_page_index(html):
    """
    获取详情页面的链接,返回的页面可能为空
    """
    try:
        data = json.loads(html)
        if data and 'data' in data.keys():
            for item in data.get('data'):
                if not item.get('article_url'):
                    continue 
                yield item.get('article_url')
    except JSONDecodeError:
        pass

def get_page_detail(url):
    """
    详情页的请求
    """	
    try:
        response = requests.get(url, headers = headers)
        if response.status_code == 200:
            return response.text
        return None
    except RequestException:
        print('请求详情页出错', url)
        return None         

def parse_page_detail(html, url):
    """
    对详情页的数据进行格式化
    """
    soup = BeautifulSoup(html, 'lxml')
    title = soup.select('title')[0].get_text()
    print(title)
    images_pattern = re.compile('gallery: JSON.parse\((.*?)\),', re.S)
    result = re.search(images_pattern, html)
    if result:
        data = json.loads(result.group(1))
        data = json.loads(data)
        if data and 'sub_images' in data.keys():
            sub_images = data.get('sub_images')
            images = [item.get('url') for item in sub_images ]
            for image in images: download_image(image)
            return {
                'title': title,
                'url': url,
                'images': images
            }
           
def save_to_mongo(result):
    """
    将数据存储到mongodb数据库中
    """
    if db[MONGO_TABLE].insert(result):
        print('存储到MongoDB成功', result)
        return True
    return False

def download_image(url):
    """
    下载图片
    """
    print('正在下载', url)
    try:
        response = requests.get(url, headers = headers)
        if response.status_code == 200:
            save_image(response.content)
        return None
    except RequestException:
        print('请求图片出错', url)
        return None         

def save_image(content):
    """
    将图片保存到本地   文件路径/文件名。后缀
    """
    file_path = '{0}/{1}.{2}'.format(os.getcwd() + '/images', md5(content).hexdigest(), 'jpg')
    if not os.path.exists(file_path):
        with open(file_path, 'wb') as f:
            f.write(content)
            f.close()    

def main(offset):
    html = get_page_index(offset, KEYWORD)
    for url in parse_page_index(html):
        html = get_page_detail(url)
        if html:
            result = parse_page_detail(html, url)
            if result: save_to_mongo(result)

if __name__ == '__main__':
    groups = [x*20 for x in range(GROUP_START, GROUP_END + 1)]
    pool = Pool()
    pool.map(main, groups)