import time

import requests
from bs4 import BeautifulSoup

# 不分页
# # 1. 把浏览器里复制到的 cookie 粘过来
# cookies = {
#     'glidedsky_session': 'eyJpdiI6Im5xdThIbXFZK1JSdm5EaUN3RFIyUWc9PSIsInZhbHVlIjoiblRCdVVsakV2SndESmVPd3lLcytRS0hubTdYWTIxcXQwNlJ6c0dTNTZ4N3U4dlRUdkRhRkFFUlhEU2l3dXB5QyIsIm1hYyI6Ijc5OTlhNzQzYjQ0YTcyN2U1MGRjMzE4MjRlNWFmZWI0ZjM2ZDJjOWJlMTlkN2RiMDExYmI4NjFiZjgyYWRkNGMifQ%3D%3D',   # 关键字段
#     # 如果网站用的 session，就换成 session=xxx
# }
#
# url = 'http://www.glidedsky.com/level/web/crawler-basic-1'
# headers = {
#     'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
# }
#
# html = requests.get(url, cookies=cookies, headers=headers)
# print(html.status_code)
# soup = BeautifulSoup(html.text, 'lxml')
#
# # 2. 提取数字
# numbers = [int(node.text) for node in soup.select('.col-md-1')]
# total = sum(numbers)
#
# print('共拿到', len(numbers), '个数字')
# print('总和 =', total)


# 分页
# cookies = {
# 'glidedsky_session':'eyJpdiI6IjUzY1hzV2xDVG5FRWxTQmpKc0kyUEE9PSIsInZhbHVlIjoiQ0gyY1Rxb2RHdlNZaGJpN1hlWlYrdGVnTXZ1Z2p6QWcwSDFRMFF5U1ltRHdTXC9PZWxnSjZTS1FNTWc0QXhGYjYiLCJtYWMiOiIwMGU2Mzk1YjQ2NGJiYTBiZDA2ODY2NjgyZmY3YzU4ZDc5OTMyOWU4N2E1MmY4NjJiYzAzMDAyYmUwZjIxOWQ3In0%3D'
#
# }
# l1 =[]
# for i in range(1,1001):
#     # time.sleep(2)
#     print(f'第{i}页')
#     url = f'http://www.glidedsky.com/level/web/crawler-basic-2?page={i}'
#     headers = {
#         'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
#     }
#
#     html = requests.get(url, cookies=cookies, headers=headers)
#     # print(html.status_code)
#     soup = BeautifulSoup(html.text, 'lxml')
#     for node in soup.select('.col-md-1'):
#         l1.append(int(node.text))
#
# numbers = sum(l1)
# print('总数为',numbers)


# import requests, bs4, time
# from concurrent.futures import ThreadPoolExecutor, as_completed
#
# cookies = {'glidedsky_session': 'eyJpdiI6IjUzY1hzV2xDVG5FRWxTQmpKc0kyUEE9PSIsInZhbHVlIjoiQ0gyY1Rxb2RHdlNZaGJpN1hlWlYrdGVnTXZ1Z2p6QWcwSDFRMFF5U1ltRHdTXC9PZWxnSjZTS1FNTWc0QXhGYjYiLCJtYWMiOiIwMGU2Mzk1YjQ2NGJiYTBiZDA2ODY2NjgyZmY3YzU4ZDc5OTMyOWU4N2E1MmY4NjJiYzAzMDAyYmUwZjIxOWQ3In0%3D'}
# headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'}
#
# session = requests.Session()          # 1. 复用连接
# session.headers.update(headers)
# session.cookies.update(cookies)
#
# def fetch(page):
#     """拉一页并返回该页 12 个数字"""
#     url = f'http://www.glidedsky.com/level/web/crawler-basic-2?page={page}'
#     for _ in range(3):                # 2. 简单重试
#         try:
#             resp = session.get(url, timeout=8)
#             if resp.status_code == 200:
#                 soup = bs4.BeautifulSoup(resp.text, 'lxml')
#                 return [int(n.text) for n in soup.select('.col-md-1')]
#         except Exception as e:
#             print(f'[page {page}] retry {_+1}/3', e)
#             time.sleep(0.5)
#     return []                         # 重试失败返回空列表
#
# all_nums = []
# start = time.time()
#
# # 3. 并发拉 1000 页，50 线程
# with ThreadPoolExecutor(max_workers=50) as exe:
#     futures = {exe.submit(fetch, p): p for p in range(1, 1001)}
#     for fu in as_completed(futures):
#         nums = fu.result()
#         all_nums.extend(nums)
#         print(f'\r已收 {len(all_nums):>5} 个数字', end='')
#
# print('\n总和 =', sum(all_nums))
# print('耗时 %.2f s' % (time.time() - start))


# 异步版本
# import asyncio, aiohttp, time, bs4
#
# cookies = {
#     'glidedsky_session': 'eyJpdiI6IjUzY1hzV2xDVG5FRWxTQmpKc0kyUEE9PSIsInZhbHVlIjoiQ0gyY1Rxb2RHdlNZaGJpN1hlWlYrdGVnTXZ1Z2p6QWcwSDFRMFF5U1ltRHdTXC9PZWxnSjZTS1FNTWc0QXhGYjYiLCJtYWMiOiIwMGU2Mzk1YjQ2NGJiYTBiZDA2ODY2NjgyZmY3YzU4ZDc5OTMyOWU4N2E1MmY4NjJiYzAzMDAyYmUwZjIxOWQ3In0%3D'
# }
# headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'}
#
# async def fetch(page, session):
#     """拉一页并返回 12 个数字"""
#     url = f'http://www.glidedsky.com/level/web/crawler-basic-2?page={page}'
#     for attempt in range(1, 4):
#         try:
#             async with session.get(url, timeout=aiohttp.ClientTimeout(total=8)) as resp:
#                 if resp.status == 200:
#                     text = await resp.text()
#                     soup = bs4.BeautifulSoup(text, 'lxml')
#                     return [int(n.text) for n in soup.select('.col-md-1')]
#         except Exception as e:
#             # 失败等 0.5 s 再重试
#             await asyncio.sleep(0.5)
#     return []   # 3 次都失败返回空列表
#
# async def main():
#     all_nums = []
#     start = time.time()
#
#     # TCP 连接池上限 50（同时最多 50 个请求）
#     conn = aiohttp.TCPConnector(limit=50, ttl_dns_cache=300)
#     timeout = aiohttp.ClientTimeout(total=8)
#     async with aiohttp.ClientSession(
#             connector=conn,
#             timeout=timeout,
#             headers=headers,
#             cookies=cookies) as session:
#
#         # 创建 1000 个协程任务
#         tasks = [asyncio.create_task(fetch(page, session)) for page in range(1, 1001)]
#
#         # 实时进度：谁完成就处理谁
#         for coro in asyncio.as_completed(tasks):
#             nums = await coro
#             all_nums.extend(nums)
#             print(f'\r已收 {len(all_nums):>5} 个数字', end='')
#
#     print('\n总和 =', sum(all_nums))
#     print('耗时 %.2f s' % (time.time() - start))
#
# if __name__ == '__main__':
#     # Windows 需加这一行避免 RuntimeError
#     asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
#     asyncio.run(main())