import os
import time
import requests
from bs4 import BeautifulSoup
import pymongo
from functools import wraps
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor

url = "http://47.103.13.124:8001/base"
urls = [url] * 10

cookies = {
    'session': '.eJyrViotTi1SsqpWyiyOT0zJzcxTsjLQUcrJTwexSopKU3WUcvOTMnNSlayUDM3gQEkHrDE-M0XJyhjCzkvMBSmKKTVNMjMDkiamFkq1tQDfeR3n.YKXKWQ.QcA_zWfTFZFGlGik_5milrY3gRA'
}

client = pymongo.MongoClient(host='localhost', port=27017)
db = client.crawler
collection = db.demo1


"""
大型爬虫，结构复杂一些
ThreadPoolExecutor 或 ProcessPoolExecutor 可能会报错
多线程中的线程任务是否可以开启新的线程或新的进程？
多进程中的进程任务是否可以开启新的线程或新的进程？
"""

def crawler(url):
    r = requests.get(url, cookies=cookies)
    soup = BeautifulSoup(r.text, 'lxml')

    movie_list = soup.find('div', class_='movie-list').find_all('a')
    datas = []
    for movie in movie_list:

        img_url = movie.find('img').attrs.get('src')
        title = movie.find('h5').get_text()
        desc = movie.find('p').get_text()
        score = movie.find('small').get_text()

        desc2 = movie.find('small').find_next_sibling('small').get_text()
        datas.append({
            'img_url': img_url,
            'title': title,
            'desc': desc,
            'score': score,
            'desc2': desc2
        })
    # time.sleep(1)
    # 将数据插入 mongo 中
    collection.insert_many(datas)


def run_time(func):
    @wraps(func)
    def wrapper(*args, **kwargs):
        start = time.perf_counter()  # 启动时间
        r = func(*args, **kwargs)
        end = time.perf_counter()  # 结束时间
        print(f'PID: {os.getpid()}{func.__module__}.{func.__name__}: {end - start}')
        return r
    return wrapper


@run_time
def default_run():
    for _url in urls:
        crawler(_url)


@run_time
def thread_run():
    # GIL -> 单个Thread在运行
    # 请求网络，阻塞IO -> 丢给操作系统，阻塞的时候，线程池，立刻去做其他的事情
    with ThreadPoolExecutor(20) as executor:
        for _url in urls:
            executor.submit(crawler, _url)


@run_time
def process_run():
    # cpu 核数 + 1
    # 不能通过fork的方式去创建进程
    # 进程有独立的空间，进程间切换，会有较大的资源开销
    # 多进程，可以提升爬虫的速度，但不明显，因为进程更适合CPU密集型的程序
    # 因为多进程可以使用CPU的多核心，运算复杂的程序，分到CPU多个核心，并行去执行
    with ProcessPoolExecutor(5) as executor:
        for _url in urls:
            executor.submit(crawler, _url)


def main():
    default_run()
    thread_run()
    process_run()


if __name__ == "__main__":
    main()
