import asyncio
import time
import aiohttp



# async  def task(name):
#     print('task %s is running' % name)
#     await asyncio.sleep(2)
#     print('task %s done' % name)
#
# async  def main():
#     tasks = [task(i) for i in range(10)]
#     await asyncio.wait(*tasks)
#
#
# asyncio.run(task('任务1'))



# # 并发执行多个任务
# import asyncio
#
# async def task(name, delay):
#     print(f"Task {name} started")
#     await asyncio.sleep(delay)
#     print(f"Task {name} completed after {delay}s")
#
# async def main():
#     # 创建多个任务
#     tasks = [
#         task("A", 2),
#         task("B", 1),
#         task("C", 3)
#     ]
#     # 并发执行所有任务
#     await asyncio.gather(*tasks)
#
# asyncio.run(main())
#
# # 异步HTTP请求练习
# import asyncio
# import aiohttp
# import time
#
# # URL列表
# urls = [f'https://httpbin.org/get?age={i}' for i in range(5)]
#
# # 单个异步请求函数
# async def fetch(session, url):
#     async with session.get(url) as response:
#         result = await response.json()
#         print(f"Received data for age={result['args']['age']}")
#         return result
#
# # 批量异步请求
# async def fetch_all(urls):
#     async with aiohttp.ClientSession() as session:
#         tasks = [fetch(session, url) for url in urls]
#         results = await asyncio.gather(*tasks)
#         return results
#
# # 测量执行时间
# async def main():
#     start_time = time.time()
#     results = await fetch_all(urls)
#     end_time = time.time()
#     print(f"Fetched {len(results)} URLs in {end_time - start_time:.2f} seconds")
#
# asyncio.run(main())

async def fetch(session, url):
    async with session.get(url) as response:
        result = await response.json()
        print(f"Received data for age={result['args']['age']}")
        return await response.json()

async def main():
    async with aiohttp.ClientSession() as session:
        results = await asyncio.gather(*[fetch(session, f'https://httpbin.org/get?age={i}') for i in range(5)])
        for r in results:
            print(r)
    await asyncio.sleep(1)



asyncio.run(main())

# 实用练习：异步爬虫
import asyncio
import aiohttp
from urllib.parse import urljoin, urlparse


class AsyncCrawler:
    def __init__(self, concurrency=5):
        self.concurrency = concurrency
        self.semaphore = asyncio.Semaphore(concurrency)

    async def fetch_page(self, session, url):
        async with self.semaphore:  # 限制并发数量
            try:
                async with session.get(url) as response:
                    content = await response.text()
                    print(f"Fetched {url} - {len(content)} bytes")
                    return {
                        'url': url,
                        'status': response.status,
                        'content_length': len(content)
                    }
            except Exception as e:
                print(f"Failed to fetch {url}: {e}")
                return {
                    'url': url,
                    'error': str(e)
                }

    async def crawl(self, urls):
        async with aiohttp.ClientSession() as session:
            tasks = [self.fetch_page(session, url) for url in urls]
            results = await asyncio.gather(*tasks)
            return results


# 使用示例
async def main():
    urls = [
        'https://httpbin.org/get',
        'https://httpbin.org/user-agent',
        'https://httpbin.org/headers',
        'https://httpbin.org/ip',
        'https://httpbin.org/uuid'
    ]

    crawler = AsyncCrawler(concurrency=3)
    results = await crawler.crawl(urls)

    # 统计结果
    successful = sum(1 for r in results if 'error' not in r)
    print(f"\nCrawling completed: {successful}/{len(urls)} successful")


asyncio.run(main())








