import requests
from urllib.parse import quote, urlparse
from lxml import etree
import re
import xml.etree.ElementTree as ET
from xml.dom import minidom
import os
from datetime import datetime


class DoubanMovieUtils:
    """豆瓣电影信息提取工具类，支持生成Emby NFO格式和海报下载"""

    def __init__(self, cookie, media_name, poster_dir="posters"):
        """
        初始化工具类

        :param cookie: 豆瓣网站的cookie
        :param media_name: 要搜索的媒体名称
        :param poster_dir: 海报保存目录，默认为"posters"
        """
        self.cookie = cookie
        self.media_name = media_name
        self.encoded_name = quote(media_name)
        self.headers = self._get_headers()
        self.search_url = f"https://movie.douban.com/j/subject_suggest?q={self.encoded_name}"
        self.poster_dir = poster_dir
        
        # 创建海报保存目录（如果不存在）
        if not os.path.exists(self.poster_dir):
            os.makedirs(self.poster_dir)

    def _get_headers(self):
        """生成请求头"""
        return {
            "cookie": self.cookie,
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/139.0.0.0 Safari/537.36 Edg/139.0.0.0",
            "x-requested-with": "XMLHttpRequest"
        }

    def search(self):
        """搜索媒体并返回结果"""
        try:
            response = requests.get(url=self.search_url, headers=self.headers)
            return response.json()
        except Exception as e:
            print(f"搜索出错: {e}")
            return None

    def get_detail_info(self, content_url):
        """
        获取详情页信息

        :param content_url: 详情页URL
        :return: 包含标题、信息和剧情简介的字典，增加了结构化字段
        """
        if not content_url:
            return None

        try:
            # 获取详情页内容
            detail_response = requests.get(url=content_url, headers=self.headers)
            html = etree.HTML(detail_response.text)

            # 提取标题
            title = html.xpath('//*[@id="content"]/h1/span[1]/text()')
            title_text = title[0].strip() if title else "未知"

            # 提取原始标题
            original_title = html.xpath('//*[@id="content"]/h1/span[2]/text()')
            original_title_text = original_title[0].strip('() ') if original_title else title_text

            # 提取基本信息文本
            info = html.xpath('//*[@id="info"]//text()')
            info_text = ""
            if info:
                info_text = ' '.join([text.strip() for text in info if text.strip()])

            # 从基本信息中提取结构化数据
            # 再次改进正则表达式，处理不同格式的分隔符
            directors = re.findall(r'导演\s*[:：]\s*(.*?)(?:\s+编剧|\s+主演|\s+类型|\s+制片国家|\s+上映日期|$)', info_text)
            writers = re.findall(r'编剧\s*[:：]\s*(.*?)(?:\s+导演|\s+主演|\s+类型|\s+制片国家|\s+上映日期|$)', info_text)
            actors = re.findall(r'主演\s*[:：]\s*(.*?)(?:\s+类型|\s+制片国家|\s+上映日期|\s+片长|$)', info_text)
            genres = re.findall(r'类型\s*[:：]\s*(.*?)(?:\s+制片国家|\s+上映日期|\s+片长|$)', info_text)
            countries = re.findall(r'制片国家/地区\s*[:：]\s*(.*?)(?:\s+语言|\s+上映日期|\s+片长|$)', info_text)
            release_date = re.findall(r'上映日期\s*[:：]\s*(.*?)(?:\s+片长|$)', info_text)
            runtime = re.findall(r'片长\s*[:：]\s*(\d+)\s*分钟', info_text)
            douban_id = re.findall(r'subject/(\d+)/', content_url)[0] if re.findall(r'subject/(\d+)/',
                                                                                    content_url) else ""

            # 提取剧情简介
            intro = html.xpath('//*[@id="link-report-intra"]/span[1]//text()')
            intro_text = ""
            if intro:
                intro_text = ' '.join([text.strip() for text in intro if text.strip()])

            # 提取评分
            rating = html.xpath('//*[@id="interest_sectl"]//strong/text()')
            rating_value = rating[0].strip() if rating else "0"

            # 提取高清海报URL（从详情页获取更大尺寸的海报）
            poster_url = html.xpath('//*[@id="mainpic"]/a/img/@src')
            poster_url = poster_url[0] if poster_url else ""

            return {
                "title": title_text,
                "originaltitle": original_title_text,
                "info": info_text,
                "introduction": intro_text,
                "directors": [d.strip() for d in directors[0].split('/')] if directors else [],
                "writers": [w.strip() for w in writers[0].split('/')] if writers else [],
                "actors": [a.strip() for a in actors[0].split('/')] if actors else [],
                "genres": [g.strip() for g in genres[0].split('/')] if genres else [],
                "countries": [c.strip() for c in countries[0].split('/')] if countries else [],
                "release_date": release_date[0].strip().split('(')[0] if release_date else "",
                "runtime": runtime[0].strip() if runtime else "",
                "rating": rating_value,
                "douban_id": douban_id,
                "url": content_url,
                "poster_url": poster_url  # 新增海报URL字段
            }

        except Exception as e:
            print(f"获取详情出错: {e}")
            return None

    def download_poster(self, poster_url, title, douban_id):
        """
        下载电影海报
        
        :param poster_url: 海报图片URL
        :param title: 电影标题，用于生成文件名
        :param douban_id: 豆瓣ID，用于生成临时文件标识
        :return: 保存的海报路径和文件名（用于后续删除临时文件），如果下载失败则返回None
        """
        if not poster_url:
            print("没有海报URL，无法下载")
            return None

        try:
            # 从URL获取文件扩展名
            parsed_url = urlparse(poster_url)
            file_ext = os.path.splitext(parsed_url.path)[1]
            if not file_ext:
                file_ext = ".jpg"  # 默认使用jpg格式
                
            # 简化文件名生成逻辑，只使用电影标题
            safe_title = re.sub(r'[/:*?"<>|]', '', title)
            filename = f"{safe_title}{file_ext}"
            filepath = os.path.join(self.poster_dir, filename)
            
            # 检查文件是否已存在
            if os.path.exists(filepath):
                print(f"海报已存在: {filepath}")
                return filepath
                
            # 下载海报
            response = requests.get(poster_url, headers=self.headers, stream=True)
            if response.status_code == 200:
                with open(filepath, 'wb') as f:
                    for chunk in response.iter_content(1024):
                        f.write(chunk)
                print(f"海报已下载: {filepath}")
                return filepath
            else:
                print(f"下载海报失败，状态码: {response.status_code}")
                return None
                
        except Exception as e:
            print(f"下载海报出错: {e}")
            return None

    def generate_nfo(self, detail_info):
        """
        生成符合Emby格式的NFO XML内容，仅保留必要标签

        :param detail_info: 电影详情信息字典
        :return: 格式化的XML字符串
        """
        if not detail_info:
            return None

        # 创建根元素
        root = ET.Element("movie")

        # 保留的标签：title、originaltitle、actor、genre、thumb
        ET.SubElement(root, "title").text = detail_info.get("title", "")
        ET.SubElement(root, "originaltitle").text = detail_info.get("originaltitle", "")
        
        # 添加海报链接
        if detail_info.get("poster_url"):
            ET.SubElement(root, "thumb").text = str(detail_info.get("poster_url", ""))
        
        # 演员
        for actor in detail_info.get("actors", []):
            act_elem = ET.SubElement(root, "actor")
            ET.SubElement(act_elem, "name").text = str(actor)

        # 类型
        for genre in detail_info.get("genres", []):
            ET.SubElement(root, "genre").text = str(genre)

        # 格式化XML
        rough_string = ET.tostring(root, 'utf-8')
        reparsed = minidom.parseString(rough_string)
        return reparsed.toprettyxml(indent="  ", encoding="utf-8").decode("utf-8")

    def save_nfo(self, nfo_content, file_name=None):
        """
        保存NFO内容到文件

        :param nfo_content: NFO的XML内容
        :param file_name: 保存的文件名，默认为电影标题
        :return: 是否保存成功
        """
        if not nfo_content:
            return False

        try:
            # 生成文件名，如果未指定则使用标题
            if not file_name:
                # 从XML中提取标题作为文件名
                root = ET.fromstring(nfo_content)
                title_elem = root.find("title")
                if title_elem is not None and title_elem.text:
                    file_name = re.sub(r'[\/:*?"<>|]', '', title_elem.text) + ".nfo"
                else:
                    file_name = "movie.nfo"

            # 保存文件
            with open(file_name, 'w', encoding='utf-8') as f:
                f.write(nfo_content)
            print(f"NFO文件已保存: {file_name}")
            return True
        except Exception as e:
            print(f"保存NFO文件出错: {e}")
            return False

    def run(self, save_nfo=False, download_poster=False):
        """
        执行搜索并打印结果

        :param save_nfo: 是否保存NFO文件
        :param download_poster: 是否下载海报
        """
        results = self.search()

        if results:
            print(f"搜索 '{self.media_name}' 找到 {len(results)} 个相关结果：\n")

            for i, item in enumerate(results, 1):
                print(f"===== 结果 {i} =====")
                print(f"标题: {item.get('title', '未知')}")
                print(f"类型: {item.get('type', '未知')}")
                print(f"年份: {item.get('year', '未知')}")
                print(f"集数: {item.get('episode', '未知')}")
                print(f"海报URL: {item.get('img', '无')}")

                # 获取并打印详情信息
                detail_info = self.get_detail_info(item.get('url'))
                if detail_info:
                    print(f"详情页标题: {detail_info['title']}")
                    print(f"基本信息: {detail_info['info']}...")
                    print(f"剧情简介: {detail_info['introduction']}...")

                    # 生成NFO
                    nfo_content = self.generate_nfo(detail_info)
                    if nfo_content and save_nfo:
                        self.save_nfo(nfo_content)
                    
                    # 下载海报
                    if download_poster and detail_info.get('poster_url'):
                        self.download_poster(
                            detail_info['poster_url'],
                            detail_info['title'],
                            detail_info['douban_id']
                        )

                print("\n" + "-" * 50 + "\n")
        else:
            print(f"没有找到与 '{self.media_name}' 相关的结果")
    