from lxml import etree
import asyncio  # 协程库
import aiohttp  # 协程http请求
import json

url = "https://movie.douban.com/top250"
headers={
        'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Connection': 'keep-alive',
        'Host': 'movie.douban.com',
        'Referer': 'https://movie.douban.com/top250?start=0&filter=',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
    }

async def fetch_content(url):
    await asyncio.sleep(2) # 防止请求过快 等待2秒
    async with aiohttp.ClientSession(
        headers=headers, connector=aiohttp.TCPConnector(ssl=False)
    ) as session:
        async with session.get(url) as response:
            return await response.text()


# 主要做html页面解析
async def parse(url):
    page = await fetch_content(url)
    html = etree.HTML(page)

    xpath_movie = '//*[@id="content"]/div/div[1]/ol/li'
    xpath_title = './/span[@class="title"]'
    xpath_pages = '//*[@id="content"]/div/div[1]/div[2]/a'
    xpath_num = './/span[@class="rating_num"]'

    pages = html.xpath(xpath_pages)  # 所有页面的链接都在底部获取

    fetch_list = []  # 此定义主要用于组装所有页面完整链接
    result = []  # 此定义主要用于版块解析

    for element_movie in html.xpath(xpath_movie):
        result.append(element_movie)

    for p in pages:
        fetch_list.append(url + p.get("href"))  # 解析翻页按钮对应的链接 组成完整后边页面链接

    tasks = [fetch_content(url) for url in fetch_list]  # 并行处理所有翻页的页面

    # 并发执行
    pages = await asyncio.gather(*tasks)

    for page in pages:
        html = etree.HTML(page)
        for element_movie in html.xpath(xpath_movie):
            result.append(element_movie)


    titles = []
    for i, movie in enumerate(result, 1):
        title = movie.find(xpath_title).text
        titles.append(title)
        # print(titles)

    nums = []
    for i, movie in enumerate(result, 1):
        num = movie.find(xpath_num).text
        nums.append(num)
        # print(num)

    # dictdata = dict()
    # for i in titles:
    #     # print(i)
    #     dictdata = {
    #         "title": i
    #     }
    #     with open("豆瓣2502.json", "a+", encoding="utf-8")as f:
    #         diledata = json.dumps(dictdata, ensure_ascii=False, indent=3)
    #         f.write(diledata)
    #         print(dictdata)
    # for j in nums:
    #     dictdata = {
    #         "num": j
    #     }
    #     with open("豆瓣2502.json", "a+", encoding="utf-8")as f:
    #         diledata = json.dumps(dictdata, ensure_ascii=False, indent=3)
    #         f.write(diledata)
    #         print(dictdata)
    # with open("豆瓣2502.json", "a+", encoding="utf-8")as f:
    #     # print(dictdata)
    #     diledata = json.dumps(dictdata, ensure_ascii=False, indent=3)
    #     f.write(diledata)
    for i in range(len(titles)):
        with open("豆瓣2502.json", "a+", encoding="utf-8")as f:
            dictdata = {
                "title": titles[i],
                "num": nums[i],
                    }
            diledata = json.dumps(dictdata, ensure_ascii=False, indent=3)
            f.write(diledata)

# 存数据出现问题 需要调试再弄

async def main():
  await parse(url)


if __name__ == "__main__":
    asyncio.run(main())

