from bs4 import BeautifulSoup
import re
from urllib.parse import quote
from datetime import datetime
import os
try:
    from PIL import Image
    PIL_AVAILABLE = True
except ImportError:
    PIL_AVAILABLE = False

# 导入新的模块
from .config import IMAGE_SIZES, IMAGE_QUALITY, TMDB_BASE_URL, TMDB_SEARCH_URL, LIST_FILE_NAME, LOST_FILE_NAME
from .utils import FileUtils
from .base_scraper import BaseScraper

 
class MovieScraper(BaseScraper):
    """电影信息爬虫类 - 从crab.py集成"""
    def __init__(self, gui_callback=None):
        super().__init__(gui_callback)
        self.download_images = True  # 默认下载海报与同人画

    def download_poster(self, poster_url, file_path, isfanart=False):
        try:
            # 使用基类的下载方法
            self.download_file(poster_url, file_path)

            # 根据isfanart参数调整图片尺寸
            self._resize_image(file_path, isfanart)

            image_type = "同人画" if isfanart else "海报"
            self._log_success(f"{image_type}下载成功: {file_path}")
        except Exception as e:
            image_type = "同人画" if isfanart else "海报"
            raise Exception(f"{image_type}下载失败: {e}")

    def _resize_image(self, file_path, isfanart=False):
        """调整图片尺寸"""
        if not PIL_AVAILABLE:
            self._log_warning("PIL库未安装，跳过图片尺寸调整")
            return

        try:
            # 使用配置文件中的尺寸设置
            image_type = "fanart" if isfanart else "poster"
            target_size = IMAGE_SIZES[image_type]
            display_name = "同人画" if isfanart else "海报"

            # 打开并调整图片尺寸
            with Image.open(file_path) as img:
                # 使用LANCZOS算法进行高质量缩放
                resized_img = img.resize(target_size, Image.Resampling.LANCZOS)

                # 保存调整后的图片
                resized_img.save(file_path, quality=IMAGE_QUALITY, optimize=True)

            self._log(f"🖼️ {display_name}尺寸已调整为 {target_size[0]}x{target_size[1]}")

        except Exception as e:
            display_name = "同人画" if isfanart else "海报"
            self._log_warning(f"{display_name}尺寸调整失败: {e}")

    def extract_movie_info(self, soup, movie_url):
        movie_info = {
            'title': '',
            'originaltitle': '',
            'year': '',
            'rating': '',
            'plot': '',
            'tagline': '',
            'runtime': '',
            'premiered': '',
            'country': '',
            'genres': [],
            'directors': [],
            'writers': [],
            'actors': [],
            'posters': [],
            'fanarts': []
        }
        title_elem = soup.find('h2')
        if title_elem:
            title_text = title_elem.get_text().strip()
            title_clean = re.sub(r'\s*\(\d{4}\)\s*$', '', title_text)
            movie_info['title'] = title_clean
            movie_info['originaltitle'] = title_clean
        release_elem = soup.find('span', class_='release')
        if release_elem:
            release_text = release_elem.get_text().strip()
            movie_info['premiered'] = re.sub(r'\([^)]*\)', '', release_text).strip()
            year_match = re.search(r'(\d{4})', release_text)
            if year_match:
                movie_info['year'] = year_match.group(1)
        rating_elem = soup.find('div', class_='user_score_chart')
        if rating_elem:
            rating_text = rating_elem.get('data-percent')
            if rating_text:
                try:
                    rating_value = float(rating_text) / 10
                    movie_info['rating'] = str(rating_value)
                except:
                    pass
        genres_section = soup.find('span', class_='genres')
        if genres_section:
            genre_links = genres_section.find_all('a')
            movie_info['genres'] = [link.get_text().strip() for link in genre_links]
        runtime_elem = soup.find('span', class_='runtime')
        if runtime_elem:
            runtime_text = runtime_elem.get_text().strip()
            match = re.match(r'(\d+)\s*h\s*(\d+)\s*m', runtime_text)
            if match:
                hours = int(match.group(1))
                minutes = int(match.group(2))
                movie_info['runtime'] = str(hours * 60 + minutes)
        overview_div = soup.find('div', class_='overview')
        if overview_div:
            overview_p = overview_div.find('p')
            if overview_p:
                overview_text = overview_p.get_text().strip()
                if len(overview_text) > 20:
                    movie_info['plot'] = overview_text
        all_p_tags = soup.find_all('p')
        for i, p_tag in enumerate(all_p_tags):
            if 'Director' in p_tag.get_text():
                if i > 0:
                    prev_p = all_p_tags[i-1]
                    director_link = prev_p.find('a')
                    if director_link:
                        movie_info['directors'].append(director_link.get_text().strip())
        ol_tags = soup.find_all('ol')
        if len(ol_tags) >= 2:
            cast_ol = ol_tags[1]
            cast_items = cast_ol.find_all('li')
            for i, li in enumerate(cast_items):
                img_elem = li.find('img')
                actor_name = ''
                thumb_url = ''
                if img_elem:
                    actor_name = img_elem.get('alt', '').strip()
                    thumb_url = img_elem.get('src', '')
                if not actor_name:
                    name_links = li.find_all('a')
                    if len(name_links) >= 2:
                        actor_name = name_links[1].get_text().strip()
                character_elem = li.find('p', class_='character')
                character_name = character_elem.get_text().strip() if character_elem else ''
                if actor_name:
                    movie_info['actors'].append({
                        'name': actor_name,
                        'role': character_name,
                        'order': i,
                        'thumb': thumb_url
                    })
        poster_imgs = soup.find_all('img', class_='poster')
        for img in poster_imgs:
            src = img.get('src')
            if src:
                preview_url = src.replace('/w300_and_h450_bestv2', '/w500')#.replace('/media.themoviedb.org', '/image.tmdb.org')
                full_url = src.replace('/w300_and_h450_bestv2', '/original')#.replace('/media.themoviedb.org', '/image.tmdb.org')
                movie_info['posters'].append({
                    'preview': preview_url,
                    'full': full_url
                })
        backdrop_imgs = soup.find_all('img', class_='backdrop')
        for img in backdrop_imgs:
            src = img.get('src')
            if src:
                preview_url = src.replace('/w533_and_h300_bestv2', '/w780')#.replace('/media.themoviedb.org', '/image.tmdb.org')
                full_url = src.replace('/w533_and_h300_bestv2', '/original')#.replace('/media.themoviedb.org', '/image.tmdb.org')
                movie_info['fanarts'].append({
                    'preview': preview_url,
                    'full': full_url
                })
        body_text = soup.get_text()
        if '默认语言' in body_text or '默认语言' in body_text:
            if '汉语' in body_text or '中文' in body_text:
                movie_info['country'] = '中国'
            elif '英语' in body_text or 'English' in body_text:
                movie_info['country'] = '美国'
            else:
                movie_info['country'] = '其它'
        return movie_info
    def generate_xml(self, movie_info):
        xml = '<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>\n'
        xml += '<movie>\n'
        xml += f'    <title>{self.escape_xml(movie_info.get("title", ""))}</title>\n'
        xml += f'    <originaltitle>{self.escape_xml(movie_info.get("originaltitle", ""))}</originaltitle>\n'
        if movie_info.get('rating'):
            xml += '    <ratings>\n'
            xml += '        <rating name="themoviedb" max="10" default="true">\n'
            xml += f'            <value>{movie_info["rating"]}</value>\n'
            xml += '            <votes>0</votes>\n'
            xml += '        </rating>\n'
            xml += '    </ratings>\n'
        xml += '    <userrating>0</userrating>\n'
        xml += '    <top250>0</top250>\n'
        xml += f'    <outline></outline>\n'
        xml += f'    <plot>{self.escape_xml(movie_info.get("plot", ""))}</plot>\n'
        xml += f'    <tagline>{self.escape_xml(movie_info.get("tagline", ""))}</tagline>\n'
        xml += f'    <runtime>{movie_info.get("runtime", "0")}</runtime>\n'
        if movie_info.get('posters'):
            for poster in movie_info['posters']:
                xml += f'    <thumb aspect="poster" preview="{poster["preview"]}">{poster["full"]}</thumb>\n'
        if movie_info.get('fanarts'):
            xml += '    <fanart>\n'
            for fanart in movie_info['fanarts']:
                xml += f'        <thumb preview="{fanart["preview"]}">{fanart["full"]}</thumb>\n'
            xml += '    </fanart>\n'
        xml += '    <mpaa></mpaa>\n'
        xml += '    <playcount>0</playcount>\n'
        xml += '    <lastplayed></lastplayed>\n'
        xml += '    <id></id>\n'
        xml += '    <uniqueid type="imdb" default="true"></uniqueid>\n'
        xml += '    <uniqueid type="tmdb"></uniqueid>\n'
        if movie_info.get('genres'):
            for genre in movie_info['genres']:
                xml += f'    <genre>{self.escape_xml(genre)}</genre>\n'
        if movie_info.get('country'):
            xml += f'    <country>{self.escape_xml(movie_info["country"])} </country>\n'
        if movie_info.get('writers'):
            for writer in movie_info['writers']:
                xml += f'    <credits>{self.escape_xml(writer)}</credits>\n'
        if movie_info.get('directors'):
            for director in movie_info['directors']:
                xml += f'    <director>{self.escape_xml(director)}</director>\n'
        if movie_info.get('premiered'):
            xml += f'    <premiered>{movie_info["premiered"]}</premiered>\n'
        if movie_info.get('year'):
            xml += f'    <year>{movie_info["year"]}</year>\n'
        xml += '    <status></status>\n'
        xml += '    <code></code>\n'
        xml += '    <aired></aired>\n'
        xml += '    <studio></studio>\n'
        xml += '    <trailer></trailer>\n'
        if movie_info.get('actors'):
            for actor in movie_info['actors']:
                xml += '    <actor>\n'
                xml += f'        <name>{self.escape_xml(actor.get("name", ""))}</name>\n'
                xml += f'        <role>{self.escape_xml(actor.get("role", ""))}</role>\n'
                xml += f'        <order>{actor.get("order", 0)}</order>\n'
                xml += f'        <thumb>{actor.get("thumb", "")}</thumb>\n'
                xml += '    </actor>\n'
        xml += '    <resume>\n'
        xml += '        <position>0.000000</position>\n'
        xml += '        <total>0.000000</total>\n'
        xml += '    </resume>\n'
        xml += f'    <dateadded>{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}</dateadded>\n'
        xml += '</movie>\n'
        return xml
    def escape_xml(self, text):
        if not text:
            return ''
        return (text
                .replace('&', '&amp;')
                .replace('<', '&lt;')
                .replace('>', '&gt;')
                .replace('"', '&quot;')
                .replace("'", '&apos;'))

    def get_largest_video_file(self, movie_folder_path):
        """获取电影文件夹中尺寸最大的视频文件名（不含扩展名）"""
        try:
            return FileUtils.get_largest_video_file(movie_folder_path)
        except Exception as error:
            self._log_error(f"获取视频文件时出错: {error}")
            return os.path.basename(movie_folder_path)

    def scrape_movie_info_to_folder(self, movie_url, target_folder, movie_name):
        try:
            self._log(f"🎬 正在爬取电影信息: {movie_name}")
            response = self.get(movie_url)
            soup = BeautifulSoup(response.text, 'html.parser')
            movie_info = self.extract_movie_info(soup, movie_url)
            xml_content = self.generate_xml(movie_info)
            video_file_name = self.get_largest_video_file(target_folder)
            nfo_file_path = os.path.join(target_folder, f"{video_file_name}.nfo")
            with open(nfo_file_path, 'w', encoding='utf-8') as f:
                f.write(xml_content)
            self._log(f"✅ NFO文件已保存: {nfo_file_path}")

            # 根据设置决定是否下载海报和同人画
            if self.download_images:
                # 使用统一的海报下载逻辑
                self._download_movie_images(movie_info, target_folder, video_file_name)
            else:
                self._log(f"ℹ️ 跳过海报与同人画下载: {movie_name}")

            return movie_info
        except Exception as error:
            self._log(f"❌ 爬取失败: {movie_name} - {error}")
            raise error

    def download_images_from_url(self, movie_url, movie_folder_path, movie_name, video_file_name):
        """从电影URL下载海报和同人画（用于NFO已存在的情况）"""
        try:
            # 获取电影信息（仅用于提取海报链接）
            response = self.get(movie_url)
            soup = BeautifulSoup(response.text, 'html.parser')
            movie_info = self.extract_movie_info(soup, movie_url)

            # 使用统一的海报下载逻辑
            self._download_movie_images(movie_info, movie_folder_path, video_file_name)

        except Exception as error:
            self._log(f"❌ 海报下载过程出错: {movie_name} - {error}")

    def _download_movie_images(self, movie_info, target_folder, video_file_name):
        """统一的海报和同人画下载逻辑"""
        # 处理海报
        if movie_info.get('posters') and len(movie_info['posters']) > 0:
            poster_url = movie_info['posters'][0]['preview']
            poster_filename = f"{video_file_name}-poster.jpg"
            poster_path = os.path.join(target_folder, poster_filename)

            # 检查海报文件是否已存在
            if os.path.exists(poster_path):
                self._log(f"⚠️ 海报文件已存在，跳过下载: {poster_path}")
            else:
                try:
                    self.download_poster(poster_url, poster_path, isfanart=False)
                except Exception as e:
                    error_msg = f"海报下载失败: {poster_url} - {str(e)}"
                    self._log(f"❌ {error_msg}")
                    # 记录到lost.txt
                    with open(os.path.join(target_folder, LOST_FILE_NAME), "a", encoding="utf-8") as f:
                        f.write(f"{datetime.now()} - {error_msg}\n")

        # 处理同人画
        if movie_info.get('fanarts') and len(movie_info['fanarts']) > 0:
            fanart_url = movie_info['fanarts'][0]['preview']
            fanart_filename = f"{video_file_name}-fanart.jpg"
            fanart_path = os.path.join(target_folder, fanart_filename)

            # 检查同人画文件是否已存在
            if os.path.exists(fanart_path):
                self._log(f"⚠️ 同人画文件已存在，跳过下载: {fanart_path}")
            else:
                try:
                    self.download_poster(fanart_url, fanart_path, isfanart=True)
                except Exception as e:
                    error_msg = f"同人画下载失败: {fanart_url} - {str(e)}"
                    self._log(f"❌ {error_msg}")
                    # 记录到lost.txt
                    with open(os.path.join(target_folder, LOST_FILE_NAME), "a", encoding="utf-8") as f:
                        f.write(f"{datetime.now()} - {error_msg}\n")

    def parse_nfo_for_images(self, nfo_file_path):
        """解析NFO文件获取海报和同人画链接"""
        poster_urls = []
        fanart_urls = []

        try:
            import xml.etree.ElementTree as ET

            # 解析XML文件
            tree = ET.parse(nfo_file_path)
            root = tree.getroot()

            # 查找海报链接 - <thumb aspect="poster">
            for thumb in root.findall('thumb'):
                aspect = thumb.get('aspect')
                if aspect == 'poster':
                    url = thumb.text
                    if url and url.strip():
                        poster_urls.append(url.strip())

            # 查找同人画链接 - <fanart><thumb>
            fanart_element = root.find('fanart')
            if fanart_element is not None:
                for thumb in fanart_element.findall('thumb'):
                    url = thumb.text
                    if url and url.strip():
                        fanart_urls.append(url.strip())

            self._log(f"📄 NFO解析结果: 海报链接 {len(poster_urls)} 个，同人画链接 {len(fanart_urls)} 个")

        except Exception as e:
            self._log(f"❌ NFO文件解析失败: {e}")

        return poster_urls, fanart_urls

    def process_nfo_poster_download(self, folder_path, get_largest_video_file_func, progress_callback=None):
        """根据NFO文件批量下载海报"""
        try:
            # 阶段1：扫描文件夹 (0-10%)
            if progress_callback:
                progress_callback(0, "正在扫描电影文件夹...")

            # 扫描所有子文件夹
            movie_folders = []
            for item in os.listdir(folder_path):
                item_path = os.path.join(folder_path, item)
                if os.path.isdir(item_path):
                    movie_folders.append((item, item_path))

            self._log(f"📁 找到 {len(movie_folders)} 个电影文件夹")

            if not movie_folders:
                self._log("⚠️ 未找到任何电影文件夹")
                return 0, 0, 0

            # 阶段2：处理电影文件夹 (10-95%)
            if progress_callback:
                progress_callback(10, f"开始处理 {len(movie_folders)} 个文件夹...")

            success_count = 0
            failed_count = 0
            skipped_count = 0
            total_folders = len(movie_folders)

            for i, (folder_name, movie_folder_path) in enumerate(movie_folders):
                # 更新进度条 (10-95%的范围)
                if progress_callback:
                    progress = int(10 + (i / total_folders) * 85)
                    current_folder = folder_name[:15] + "..." if len(folder_name) > 15 else folder_name
                    progress_callback(progress, f"下载中: {current_folder} ({i+1}/{total_folders})")
                self._log(f"\n📂 处理文件夹 ({i+1}/{len(movie_folders)}): {folder_name}")

                try:
                    # 获取视频文件名
                    video_file_name = get_largest_video_file_func(movie_folder_path)

                    # 检查NFO文件是否存在
                    nfo_file_path = os.path.join(movie_folder_path, f"{video_file_name}.nfo")
                    if not os.path.exists(nfo_file_path):
                        self._log(f"⏭️ 未找到NFO文件，跳过: {folder_name}")
                        skipped_count += 1
                        continue

                    # 检查海报和同人画是否已存在
                    poster_path = os.path.join(movie_folder_path, f"{video_file_name}-poster.jpg")
                    fanart_path = os.path.join(movie_folder_path, f"{video_file_name}-fanart.jpg")

                    poster_exists = os.path.exists(poster_path)
                    fanart_exists = os.path.exists(fanart_path)

                    if poster_exists and fanart_exists:
                        self._log(f"✅ 海报和同人画已存在，跳过: {folder_name}")
                        skipped_count += 1
                        continue

                    # 解析NFO文件获取海报链接
                    poster_urls, fanart_urls = self.parse_nfo_for_images(nfo_file_path)

                    downloaded_any = False

                    # 下载海报
                    if not poster_exists and poster_urls:
                        try:
                            poster_url = poster_urls[0]  # 使用第一个海报链接
                            self.download_poster(poster_url, poster_path, isfanart=False)
                            self._log(f"✅ 海报下载成功: {video_file_name}-poster.jpg")
                            downloaded_any = True
                        except Exception as e:
                            self._log(f"❌ 海报下载失败: {e}")
                    elif poster_exists:
                        self._log(f"ℹ️ 海报已存在: {video_file_name}-poster.jpg")

                    # 下载同人画
                    if not fanart_exists and fanart_urls:
                        try:
                            fanart_url = fanart_urls[0]  # 使用第一个同人画链接
                            self.download_poster(fanart_url, fanart_path, isfanart=True)
                            self._log(f"✅ 同人画下载成功: {video_file_name}-fanart.jpg")
                            downloaded_any = True
                        except Exception as e:
                            self._log(f"❌ 同人画下载失败: {e}")
                    elif fanart_exists:
                        self._log(f"ℹ️ 同人画已存在: {video_file_name}-fanart.jpg")

                    if downloaded_any:
                        success_count += 1
                    else:
                        skipped_count += 1

                    # 添加延迟，避免请求过于频繁
                    if i < len(movie_folders) - 1:
                        self.add_delay(1)

                except Exception as error:
                    self._log(f"❌ 处理失败: {folder_name} - {error}")
                    failed_count += 1

            # 阶段3：完成处理 (95-100%)
            if progress_callback:
                progress_callback(95, "正在完成下载...")

            self._log(f"\n🎉 根据NFO文件下载海报完成!")
            self._log(f"📊 统计: 成功 {success_count} 个，失败 {failed_count} 个，跳过 {skipped_count} 个")

            if progress_callback:
                progress_callback(100, "下载完成")

            return success_count, failed_count, skipped_count

        except Exception as error:
            self._log(f"❌ 处理过程出错: {error}")
            raise error

# ====== MovieScraper类提前结束 ======

class ListGenerator(BaseScraper):
    """
    TMDb电影利削器
    """

    def __init__(self, gui_callback=None):
        """
        初始化ListGenerator实例

        Args:
            gui_callback (function): GUI回调函数，用于更新界面显示
        """
        super().__init__(gui_callback)
        # 为ListGenerator添加额外的请求头
        self.headers['upgrade-insecure-requests'] = '1'
        self.session.headers.update(self.headers)
    
    def search_movie(self, movie_name):
        """
        在TMDb网站搜索电影信息

        Args:
            movie_name (str): 电影名称

        Returns:
            list: 搜索结果数组，包含电影标题、年份和URL
        """
        try:
            search_url = f"{TMDB_SEARCH_URL}{quote(movie_name)}"
            self._log(f"🔍 搜索电影: {movie_name}")

            response = self.get(search_url)

            soup = BeautifulSoup(response.text, 'html.parser')

            # 根据分析，搜索结果在 div.search_results.movie > div 中

            movie_results = soup.select('div.search_results.movie div.results.flex div.card') 
            results = []

            for elem in movie_results:
                # 查找所有 a.result 链接
                links = elem.select('a.result')

                # 遍历所有链接，找到包含 h2 元素的那个
                for link in links:
                    href = link.get('href')
                    title_elem = link.select_one('h2')

                    if title_elem:  # 如果找到了 h2 元素
                        title = title_elem.get_text(strip=True)
                        year_elem = elem.select_one('span.release_date')
                        year = year_elem.get_text(strip=True) if year_elem else ''

                        if href and title:
                            results.append({
                                'title': title,
                                'year': year,
                                'url': f"{TMDB_BASE_URL}{href}"
                            })
                            break  # 找到有效结果后跳出内层循环

            self._log(f"   找到 {len(results)} 个搜索结果")
            return results

        except Exception as error:
            self._log_error(f"搜索电影 \"{movie_name}\" 时出错: {error}")
            return []
    
    # _log方法已在BaseScraper中实现，这里删除重复代码

    def get_largest_video_file(self, movie_folder_path):
        """获取电影文件夹中尺寸最大的视频文件名（不含扩展名）"""
        try:
            return FileUtils.get_largest_video_file(movie_folder_path)
        except Exception as error:
            self._log_error(f"获取视频文件时出错: {error}")
            return os.path.basename(movie_folder_path)

    def get_user_choice(self, movie_name, results, movie_folder_path=None):
        """
        向用户展示搜索结果并获取选择

        Args:
            movie_name (str): 电影名称（文件夹名）
            results (list): 搜索结果数组
            movie_folder_path (str): 电影文件夹路径，用于获取视频文件名

        Returns:
            int: 用户选择的索引
        """
        # 获取视频文件名作为显示名称
        if movie_folder_path:
            display_name = self.get_largest_video_file(movie_folder_path)
        else:
            display_name = movie_name
        
        self._log(f"\n📋 \"{display_name}\" 找到多个匹配结果:")
        for index, result in enumerate(results):
            year_str = f" ({result['year']})" if result['year'] else ''
            self._log(f"   {index + 1}. {result['title']}{year_str}")
        self._log("   0. 跳过此电影")

        # 如果有GUI回调，说明是GUI模式，需要使用对话框
        if self.gui_callback and hasattr(self, '_gui_parent'):
            # 使用事件来同步线程
            import threading
            choice_event = threading.Event()
            choice_result = [0]  # 使用列表来存储结果，以便在内部函数中修改

            def show_dialog():
                try:
                    from .gui import MovieChoiceDialog
                    dialog = MovieChoiceDialog(self._gui_parent, display_name, results)
                    choice_result[0] = dialog.result
                finally:
                    choice_event.set()

            # 在主线程中执行对话框
            self._gui_parent.after(0, show_dialog)

            # 等待对话框完成
            choice_event.wait()

            return choice_result[0]
        else:
            # 命令行模式
            while True:
                try:
                    choice = input(f"\n请选择对应的电影 (1-{len(results)}, 0=跳过): ")
                    choice = int(choice)
                    if 0 <= choice <= len(results):
                        return choice
                    else:
                        print(f"请输入 0 到 {len(results)} 之间的数字")
                except ValueError:
                    print("请输入有效的数字")
    
    def read_existing_list(self, movies_dir):
        """
        读取已存在的List.txt文件，获取已处理的电影列表
        
        Args:
            movies_dir (str): 电影文件夹路径
            
        Returns:
            set: 已处理电影名称的集合
        """
        list_file_path = os.path.join(movies_dir, LIST_FILE_NAME)
        processed_movies = set()
        
        if os.path.exists(list_file_path):
            self._log(f"📄 发现已存在的 {LIST_FILE_NAME} 文件: {list_file_path}")
            try:
                with open(list_file_path, 'r', encoding='utf-8') as f:
                    lines = f.readlines()

                for line in lines:
                    trimmed_line = line.strip()
                    # 跳过空行和注释行
                    if trimmed_line and not trimmed_line.startswith('//') and '||' in trimmed_line:
                        movie_name = trimmed_line.split('||')[0]
                        if movie_name:
                            processed_movies.add(movie_name.strip())

                self._log(f"   已找到 {len(processed_movies)} 部已处理的电影")

            except Exception as error:
                self._log(f"❌ 读取 {LIST_FILE_NAME} 文件时出错: {error}")
        
        return processed_movies
    
    def generate_list(self, movies_dir):
        """
        生成电影列表文件
        
        Args:
            movies_dir (str): 电影文件夹路径
        """
        try:
            self._log(f"📁 开始扫描电影文件夹: {movies_dir}")

            # 检查文件夹是否存在
            if not os.path.exists(movies_dir):
                self._log(f"❌ 错误: 文件夹不存在: {movies_dir}")
                return

            # 读取已存在的List.txt文件
            processed_movies = self.read_existing_list(movies_dir)
            list_file_path = os.path.join(movies_dir, LIST_FILE_NAME)
            lost_file_path = os.path.join(movies_dir, LOST_FILE_NAME)

            # 读取所有子文件夹
            try:
                items = os.listdir(movies_dir)
                movie_folders = [item for item in items if os.path.isdir(os.path.join(movies_dir, item))]
            except Exception as error:
                self._log(f"❌ 读取文件夹时出错: {error}")
                return

            if not movie_folders:
                self._log("⚠️  未找到任何子文件夹")
                return

            self._log(f"📂 找到 {len(movie_folders)} 个电影文件夹")

            # 过滤出需要处理的电影（排除已处理的）
            unprocessed_movies = [movie for movie in movie_folders if movie not in processed_movies]

            if not unprocessed_movies:
                self._log("✅ 所有电影都已处理完成，无需重新搜索")
                return

            self._log(f"📝 需要处理 {len(unprocessed_movies)} 部新电影: {', '.join(unprocessed_movies)}")
            if processed_movies:
                self._log(f"⏭️  跳过 {len(processed_movies)} 部已处理的电影")
            
            # 用于统计处理结果
            processed_count = 0
            lost_count = 0
            
            for i, movie_name in enumerate(unprocessed_movies):
                self._log(f"\n[{i + 1}/{len(unprocessed_movies)}] 处理: {movie_name}")

                # 搜索电影
                search_results = self.search_movie(movie_name)

                if not search_results:
                    self._log(f"❌ 未找到 \"{movie_name}\" 的搜索结果")

                    # 立即写入到lost.txt
                    lost_entry = f"未找到结果: {movie_name}"
                    with open(lost_file_path, 'a', encoding='utf-8') as f:
                        f.write(lost_entry + '\n')
                    lost_count += 1

                    self._log(f"   已写入到 lost.txt: {lost_entry}")

                elif len(search_results) == 1:
                    result = search_results[0]
                    year_str = f" ({result['year']})" if result['year'] else ''
                    self._log(f"✅ 自动匹配: {result['title']}{year_str}")

                    # 立即写入一行到List.txt
                    entry = f"{movie_name}||{result['url']}"
                    with open(list_file_path, 'a', encoding='utf-8') as f:
                        f.write(entry + '\n')
                    processed_count += 1

                    self._log(f"   已写入到 List.txt: {entry}")

                else:
                    choice = self.get_user_choice(movie_name, search_results, os.path.join(movies_dir, movie_name))

                    if 1 <= choice <= len(search_results):
                        selected_result = search_results[choice - 1]
                        year_str = f" ({selected_result['year']})" if selected_result['year'] else ''
                        self._log(f"✅ 用户选择: {selected_result['title']}{year_str}")

                        # 立即写入一行到List.txt
                        entry = f"{movie_name}||{selected_result['url']}"
                        with open(list_file_path, 'a', encoding='utf-8') as f:
                            f.write(entry + '\n')
                        processed_count += 1

                        # 立即写入到lost.txt
                        lost_entry = f"多结果确认: {movie_name}"
                        with open(lost_file_path, 'a', encoding='utf-8') as f:
                            f.write(lost_entry + '\n')
                        lost_count += 1

                        self._log(f"   已写入到 List.txt: {entry}")

                    else:
                        self._log(f"⏭️  跳过: {movie_name}")

                        # 立即写入到lost.txt
                        lost_entry = f"用户跳过: {movie_name}"
                        with open(lost_file_path, 'a', encoding='utf-8') as f:
                            f.write(lost_entry + '\n')
                        lost_count += 1

                        self._log(f"   已写入到 {LOST_FILE_NAME}: {lost_entry}")

                # 添加延迟，避免请求过于频繁
                if i < len(unprocessed_movies) - 1:
                    self._log('⏳ 等待 1 秒...')
                    self.add_delay(1)

            self._log(f"\n🎉 处理完成!")
            self._log(f"   - 成功处理: {processed_count} 部电影 (已写入 {LIST_FILE_NAME})")
            self._log(f"   - 未处理/跳过: {lost_count} 部电影 (已写入 {LOST_FILE_NAME})")
            self._log(f"   请审阅电影目录下的{LIST_FILE_NAME}和{LOST_FILE_NAME}，方便进行下一步")

        except Exception as error:
            self._log(f'❌ 生成 {LIST_FILE_NAME} 时出错: {error}')
