# -*- coding: utf-8 -*-

import httpx
from bs4 import BeautifulSoup
from datetime import datetime
import re
import asyncio
import  asyncio
from playwright.async_api import async_playwright
from urllib.parse import urlparse
from insights.get_info import get_info

header = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/604.1 Edg/112.0.100.0'}

movie_base_url = {
    "cinema": "https://movie.douban.com/cinema/nowplaying/shenzhen/",  # 全部正在上映
    "later": "https://movie.douban.com/cinema/later/shenzhen/",  # 即將上映
    "explore": "https://movie.douban.com/explore#!type=movie&tag=冷门佳片",  # 选电影
    "chart": "https://movie.douban.com/chart",  # 豆瓣电影排行榜
    "best": "https://movie.douban.com/review/best/",  # 豆瓣最受欢迎的影
}
douban_moive = "https://m.douban.com/movie"  # 豆瓣电影首页


# 只爬https://movie.douban.com开头的网页
async def movie_crawler(url: str, logger) -> (int, dict):
    if not url.startswith('https://movie.douban.com') and not url.startswith('http://movie.douban.com'):
        logger.warning(f'{url} is not a movie.douban url, you should not use this function')
        return -5, {}

    url = url.replace("http://", "https://", 1)

    async with async_playwright() as p:
        browser = await p.chromium.launch(headless=True)
        page = await browser.new_page()
        await page.goto(url)

        parsed_url = urlparse(url)
        path = parsed_url.path
        return await get_movie_douban(page, url, path, logger)


# https://movie.douban.com相关页面爬取
async def get_movie_douban(page, url, path, logger):
    print(f"is path ${url}")
    # 所有电影的link
    all_info_set = set()

    # 正在热映
    if path.startswith("/cinema/nowplaying"):
        movies = await page.locator('//*[@id="nowplaying"]/div[contains(@class, "mod-bd")]/ul/li').all()
        for i, movie in enumerate(movies):
            href = await movie.locator('//ul/li[contains(@class,"poster")]/a').evaluate_all('nodes => nodes.map(node => node.getAttribute("href"))')
            all_info_set.add(href[0])
    # 即将热映
    if path.startswith("/cinema/later"):
        later_movies = await page.locator('//*[@id="showing-soon"]/div').all()
        for j, link in enumerate(later_movies):
            href = await link.locator('//a').evaluate_all('nodes => nodes.map(node => node.getAttribute("href"))')
            all_info_set.add(href[0])

    # 返回文章列表，爬每个电影的详情页
    if path.startswith("/cinema") or path.startswith("/later"):
        return 1, all_info_set

    # 开始爬电影详情页
    await page.goto(url)  # 打开链接页面
    await page.wait_for_load_state('domcontentloaded')

    movie_detail = await get_movie_detail(page, path, logger)

    publish_time = datetime.strftime(datetime.today(), "%Y%m%d")

    # 通过url判断是 正在热映 还是 即将上映
    movie_from = urlparse(url).query

    return 11, {
        'title': movie_detail["name"],  # 电影名称
        'author': movie_detail["info"],  # 电影详情
        'publish_time': publish_time,
        'abstract': movie_detail['star_text'],  # 评分详情
        'content': movie_detail['summary'],
        # 'images': list(images),
        'url': url,
        'type': movie_from,
        "short_comments": movie_detail["short_comments"],
        "long_comments": movie_detail["long_comments"]
    }


# 爬取https://m.douban.com/movie/页面：豆瓣电影首页
async def m_douban_move(page, browser, url, logger):
    title = await page.title()
    contents = []
    # 等待页面中的目标元素出现
    await page.wait_for_selector('a.onjTL.pAB_V')
    links = await page.query_selector_all('a.onjTL.pAB_V')

    if not links:
        print("No links found")
        await browser.close()
        return

    urls = []

    for i, link in enumerate(links):
        link = links[i]
        try:
            href = await link.get_attribute('href')  # 获取链接的 URL
            urls.append(href.replace('$', '', 1))

        except Exception as e:
            print(f"error is ${e}")
            print(f"Link {i + 1} does not have a valid href.")

    for j, url in enumerate(urls):
        try:
            await page.goto(url)  # 打开链接页面
            await page.wait_for_load_state('domcontentloaded')

            details = await get_movie_detail(page, browser, logger)

        except Exception as e:
            logger.warning(e)

        await browser.close()

    publish_time = datetime.strftime(datetime.today(), "%Y%m%d")

    return 11, {
        'title': title,
        'author': f"详情:\n${details['info']}",
        'publish_time': publish_time,
        'abstract': f"评分：${details['star_text']}",
        'content': f"剧情简介${str(contents)}",
        # 'images': list(images),
        'url': url,
    }


# 解析电影详情页面
async def get_movie_detail(page, path, logger):
    name = ''
    summary = ''
    rec = ''
    detail = ''
    comments = ''
    try:
        # 电影名称
        name = await page.locator('//*[@id="content"]/h1').inner_text()

        # 详情：包括导演、编剧、类型等
        detail = await page.locator('//*[@id="info"]').inner_text()

        # 电影评分
        rec = await page.locator('//*[@id="interest_sectl"]').inner_text()

        # 剧情简介
        summary = await page.locator('//div[@id="link-report-intra"]').inner_text()

        # 调用大模型，总结剧情简介
        # insights = get_info(f"title: {name}\n\ncontent: {summary}")

        # 演职人员

        # 影评
        movie_id = re.search(r"/subject/(\d+)/", path).group(1)
        long_comments = await get_long_comment(page, movie_id, logger)
        short_comments = await get_movie_comments_short(page, movie_id, logger)
        comments = 'comments'

    except Exception as e:
        logger.error(e)

    return {
        "name": name,
        "summary": summary,
        "star_text": rec,
        "info": detail,
        "comments": comments,
        # "long_comments": long_comments,
        "short_comments": short_comments,
        "long_comments": long_comments
    }


# 解析长评第一个页面
async def get_long_comment(page, movie_id, logger):
    long_href = f"https://movie.douban.com/subject/{movie_id}/reviews"
    try:
        await page.goto(long_href)
        await page.wait_for_load_state('domcontentloaded')

        user_comment = []

        c_hrefs = await page.locator('div.main-bd').locator('h2').locator('a').evaluate_all('nodes => nodes.map(node => node.getAttribute("href"))')
        for i, href in enumerate(c_hrefs):
            await page.goto(href)
            await page.wait_for_load_state('domcontentloaded')

            used_ = await page.locator('div.main-bd').all()
            used_attr = await used_[0].get_attribute('data-ad-ext')  # 有用&没用数据
            match = re.search(r'有用(\d+).+没用(\d+)', used_attr)

            # is_visible = page.locator('p.main-title-tip').is_visible()
            # tag = await page.locator('p.main-title-tip').inner_text() if is_visible else ''

            comment_p = await page.locator(f'div[data-url="{href}"]').all()
            all_comments = await comment_p[0].locator('p').all()
            comment = ''
            for index, pcontent in enumerate(all_comments):
                p = await pcontent.inner_text()
                comment = comment + '\n' + p

            comment_result = {"useful": match.group(1), "useless": match.group(2), "comment": comment}
            user_comment.append(comment_result)

        return user_comment

    except Exception as e:
        logger.error(e)


# 爬长影评
async def get_movie_comments(page, movie_id, logger):
    # 短评地址：https://movie.douban.com/subject/36774001/comments?status=F
    # 影评/长评：https://movie.douban.com/subject/36774001/reviews
    long_href = f"https://movie.douban.com/subject/{movie_id}/reviews"
    comments = []
    try:
        await page.goto(long_href)  # 打开链接页面
        await page.wait_for_load_state('domcontentloaded')

        # 解析当前页面
        comment = await parse_comment_page(page)
        comments.append(comment)

        # 找到翻页按钮
        next_button_selector = '//*[@id="paginator"]/a[contains(@class,"next")]'  # 替换为你的翻页按钮的选择器

        while True:
            # 等待翻页按钮可点击
            await page.wait_for_selector(next_button_selector, state='visible')

            # 解析当前页面的信息
            comment = await parse_comment_page(page)
            comments.append(comment)

            # 点击翻页按钮
            await page.click(next_button_selector)

            # 等待页面加载完成（根据实际情况可能需要调整等待时间或条件）
            await page.wait_for_load_state('networkidle')  # 或者使用其他等待策略

            # 检查是否还有更多页（可选）
            if not await page.locator(next_button_selector).is_visible():
                break

    except Exception as e:
        logger.warning(e)

    return comments


# 短影评
async def get_movie_comments_short(page, movie_id, logger):
    short_href = f"https://movie.douban.com/subject/{movie_id}/comments?status=P"
    try:
        await page.goto(short_href)  # 打开链接页面
        await page.wait_for_load_state('networkidle')

        short_comments = await short_comment_page_parse(page)

        return short_comments

    except Exception as e:
        logger.error(e)

    return


# 解析短评页面
async def short_comment_page_parse(page):
    short_comments = []
    short_items = await page.locator('//*[@id="comments"]/div[contains(@class, "comment-item")]').all()
    for i, item in enumerate(short_items):
        h3_text = await item.locator('div.comment').locator('h3').inner_text()
        info = h3_text.split('\n')
        vote = info[0]  # 投票
        detail = info[1].split(' ')
        nick = detail[0]  # 昵称
        status = detail[1]  # 看过/想看
        time = detail[2]  # 时间
        ip_comm = detail[3]  # ip地址
        comment_text = await item.locator('div.comment').locator('p').locator('span').inner_text()

        comment = {"nick": nick, "status": status, "time": time, "ip": ip_comm, "vote": vote, "comment": comment_text}
        short_comments.append(comment)

    return short_comments


# 解析影评页面
async def parse_comment_page(page):
    # 评语
    comments_text = await page.locator('//*[@id="comments"]/div[contains(@class, "comment-item ")]/div[contains(@class,"comment")]/p/span').inner_text()

    # 获取元素的 class 和 title 属性：这两个属性可以推断出来评分
    class_name = await page.locator('span.allstar40').get_attribute('class')
    title = await page.locator('span.allstar40').get_attribute('title')

    # 多少人认为有用
    votes = await page.locator('//*[@id="comments"]/div[contains(@class, "comment-item ")]/div[contains(@class,"comment")]/h3/span[contains(@class, "comment-vote")]/span').inner_text()

    return {"comments_text": comments_text, "class_name": class_name, "title": title, "votes": votes}

