# 代码案例来自于b站up蚂蚁学Python
# cpu密集 CPU-Bound
# I/O占用时间少 CPU占用时间多
# e.g. 压缩解压缩、加密解密、正则表达式搜索

# I/O密集 I/O-Bound
# I/O占用时间多 CPU占用时间少
# e.g. 文件处理程序、网络爬虫程序、读写数据库程序

# 多进程Process - multiprocessing模块
#   适用CPU密集型计算
# 多线程Thread - threading模块 （GIL受限制）
#   适用I/O密集型计算 同时运行任务数目要求不多
# 多协程Coroutine - asyncio模块 （request不支持 需要用aiohttp）
#   适用于I/O密集型计算 需要超多任务运行 但有现成库支持的场景


# 创建多线程的方法：
# 1 准备一个函数 
# def my_func(a, b):
    # do_craw(a, b)
# 2 创建一个线程
# import threading
# t = threading.Thread(target=my_func, args=(100, 200))
# 3 启动线程
# t.start()
# 4 等待结束（可选择）
# t.join()

# case01 多线程对比单线程爬虫
urls = [
    f"https://www.cnblogs.com/#p{page}"
    for page in range(1, 51)
]
import requests
def craw(url):
    r = requests.get(url)
    print(url, len(r.text))



def single_thread():
    print('single start')
    for url in urls:
        craw(url)
    print('single end')

import threading    
def multi_thread():
    print('multi start')
    threads = []
    for url in urls:
        threads.append(
            threading.Thread(target = craw, args = (url,))
        )
    
    for thread in threads:
        thread.start()
    
    for thread in threads:
        thread.join()
    
    print('multi end')

# import time
# s = time.time()
# single_thread()
# e = time.time()
# print('single cost', e - s)
# s = time.time()
# multi_thread()
# e = time.time()
# print('multi cost', e - s)

# case02
# 多线程数据通信的 queue.Queue
# 1 导入类库
# import queue
# 2 创建Queue
# q = queue.Queue()
# 3 添加元素
# q.put(item)
# 4 获取元素
# item = q.get()
# 5 查询状态
# 元素数量 q.qsize()
# 是否为空 q.empty()
# 是否已满 q.full()

# 爬html
def craw2(url):
    r = requests.get(url)
    return r.text

# 解析html
from bs4 import BeautifulSoup
# post-item-title
def parse(html):
    soup = BeautifulSoup(html, 'html.parser')
    links = soup.find_all('a', class_ = 'post-item-title')
    return [(link['href'], link.get_text()) for link in links]

def do_craw2(url_queue, html_queue):
    while True:
        url = url_queue.get()
        html = craw2(url)
        html_queue.put(html)
        print(threading.current_thread().name, f'craw {url}', 
              'url_queue size: ', url_queue.qsize())
        time.sleep(random.randint(1, 2))
import time
import random
def do_parse(html_queue, fout):
    while True:
        html = html_queue.get()
        results = parse(html)
        for result in results:
            fout.write(str(result) + '\n')
        print(threading.current_thread().name, f'result.size', len(results), 
            'html_queue size: ', html_queue.qsize())
        time.sleep(random.randint(1, 2))

# import queue
# url_queue = queue.Queue()
# html_queue = queue.Queue()
# for url in urls:
#     url_queue.put(url)

# for idx in range(3):
#     t = threading.Thread(target=do_craw2, args=(url_queue, html_queue), name=f'craw{idx}')
#     t.start()

# fout = open('craw_data.txt', 'w')
# for idx in range(2):
#     t = threading.Thread(target=do_parse, args=(html_queue, fout), name=f'parse{idx}')
#     t.start()

# 线程安全以及Lock解决方案
# 多线程能够安全处理共享变量
# e.g.
# def draw(account, ammount):
#   if account.balance >= amount:
#       account.balance -= amount
# Lock 两种方法：
#1
# import threading 
# lk = threading.Lock()
# lk.acquire()
# try:
#     pass
# finally:
#     lk.release()
#2
# import threading
# lk = threading.Lock()
# with lk:
#     pass
import time
lock = threading.Lock()
class Account:
    def __init__(self, balance) -> None:
        self.balance = balance

def draw(account, amount):
    with lock:
        if account.balance >= amount:
            print(threading.current_thread().name, '取钱成功')
            account.balance -= amount
            print(threading.current_thread().name, '余额', account.balance)
        else:
            print(threading.current_thread().name, '取钱失败')

# account = Account(1000)
# ta = threading.Thread(name='ta', target=draw, args=(account, 800))
# tb= threading.Thread(name='tb', target=draw, args=(account, 800))
# ta.start()
# tb.start()

# case 04
# 线程池 ThreadPoolExecutor
# 线程生命周期：
# 1新建->[start]->2就绪->[获取cpu资源]->3运行->[sleep/io]->4阻塞->[sleep/io结束]->2就绪
#                                            ->[失去cpu资源]->2就绪...
#                                            ->[run方法执行完]->5终止
# 新建和终止线程分别需要分配和回收资源 频繁的新建线程会造成大量开销
# 线程池提供重用线程的方法，可以减去新建和终止的开销
# 使用一个任务队列来维护任务，线程池中的线程会依次从任务队列中取出任务执行
# 适合突发性大量并发需求；或需要大量线程、任务处理时间短的场景
# 避免系统因为创建线程过多，负荷过大影响性能
# e.g.
# from concurrent.futures import ThreadPoolExecutor, as_completed
# 1
# with ThreadPoolExecutor() as pool:
#     results = pool.map(craw, urls)
#     for result in results:
#         print(result)
# 2
# with ThreadPoolExecutor() as pool:
#     futures = [pool.submit(craw, url) for url in urls]

#     for future in futures:
#         print(future.result())
    
#     for future in as_completed(futures):  # 顺序不固定
#         print(future.result())
# from concurrent.futures import ThreadPoolExecutor, as_completed
# # craw
# with ThreadPoolExecutor() as pool:
#     htmls = pool.map(craw2, urls)
#     htmls = list(zip(urls, htmls))
#     for url, html in htmls:
#         print(url, len(html))
# print('craw over')
# # parse
# with ThreadPoolExecutor() as pool:
#     futures = {}
#     for url, html in htmls:
#         future = pool.submit(parse, html)
#         futures[future] = url
    
#     for future in as_completed(futures):
#         url = futures[future]
#         print(url)
#         print(future.result())
    
#     # for future, url in futures.items():
#     #     print(url , future.result())