import requests
import re
import os
import time
import random
import logging
from bs4 import BeautifulSoup
from download_m3u8 import Download
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry

# 配置日志记录（新增）
logging.basicConfig(
    filename='m3u8_crawler.log',
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)


class M3U8Crawler:
    def __init__(self):
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Linux; Android 13; SM-A536E) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Mobile Safari/537.36 uacq',
            'Referer': 'https://700kan.pro/',
            'Accept-Language': 'zh-CN,zh;q=0.9'
        }
        self.base_url = "https://700kan.pro/DyttPlay/{}-{}-{}.html"
        self.index_file = "crawler_index.txt"
        self.current_id = self._load_index()
        self.retries = 3  # 新增重试次数配置[2,4](@ref)

        # 配置请求重试策略（新增）[4](@ref)
        self.session = requests.Session()
        retry = Retry(
            total=3,
            backoff_factor=0.5,
            status_forcelist=[500, 502, 503, 504]
        )
        adapter = HTTPAdapter(max_retries=retry)
        self.session.mount('http://', adapter)
        self.session.mount('https://', adapter)

    def _load_index(self):
        """加载或初始化索引文件"""
        try:  # 新增异常处理[5](@ref)
            if os.path.exists(self.index_file):
                with open(self.index_file, "r") as f:
                    return int(f.read().strip() or 91973)
            return 91973
        except Exception as e:
            logging.error(f"加载索引文件失败: {str(e)}")
            return 91973

    def _save_index(self):
        """保存当前爬取进度"""
        try:  # 新增异常处理[5](@ref)
            with open(self.index_file, "w") as f:
                f.write(str(self.current_id))
            print(f"✅ 已保存进度: {self.current_id}")
        except IOError as e:
            logging.error(f"保存索引失败: {str(e)}")

    def _safe_request(self, url):
        """带重试的安全请求方法（改进版）[2,4](@ref)"""
        for attempt in range(self.retries):
            try:
                print(f"🔗 正在请求: {url} (尝试 {attempt + 1}/{self.retries})")
                response = self.session.get(url, headers=self.headers, timeout=10)
                response.raise_for_status()
                return response.text
            except requests.exceptions.RequestException as e:
                sleep_time = random.randint(2, 5) * (attempt + 1)
                logging.warning(f"请求失败: {url} - {str(e)}, {sleep_time}秒后重试...")
                time.sleep(sleep_time)
        return None

    def _parse_video_info(self, html):
        """解析视频信息（改进版）[3,8](@ref)"""
        try:
            soup = BeautifulSoup(html, 'html.parser')

            # 提取标题（改进选择器）
            title_tag = soup.select_one('title')
            if not title_tag:
                logging.warning("未找到标题标签")
                return None

            name_match = re.search(r'《(.*)》', title_tag.text)
            if not name_match:
                name_match = re.search(r'在线播放(.*?) (.*)700看电影天堂网', title_tag.text)
                logging.warning("标题格式异常")
                if not name_match:
                    print(title_tag.text)
                    return None
            name = name_match.group(1)
            print(f"📽️ 发现视频: {name}")

            # 解析m3u8地址（改进正则）
            script_tag = soup.find('script', string=re.compile(r'\.m3u8'))
            if not script_tag:
                logging.warning("未找到包含m3u8的脚本标签")
                return None

            m3u8_match = re.search(r'(https?:\\?/\\?/[^\s\'"]+\.m3u8)', script_tag.text)
            if not m3u8_match:
                logging.warning("未匹配到m3u8地址")
                return None

            m3u8_url = m3u8_match.group(0).replace('\\', '')
            print(f"🔗 解析到m3u8地址: {m3u8_url}")

            # 提取标签信息
            tags = [a.text.strip() for a in soup.select('.module-info-tag-link a')]
            print(f"🏷️ 视频标签: {', '.join(tags)}")

            return name, m3u8_url, tags
        except Exception as e:
            logging.error(f"解析失败: {str(e)}")
            return None

    def process_video(self):
        """处理单个视频（改进版）[6,7](@ref)"""
        try:
            # 遍历不同播放源（1-5对应不同源）
            self._load_index()
            self.current_id += 1
            self._save_index()
            for source_type in range(1, 6):
                url = self.base_url.format(self.current_id - 1, source_type, 1)
                html = self._safe_request(url)
                if not html:
                    continue

                result = self._parse_video_info(html)
                if not result:
                    continue

                name, m3u8_url, tags = result
                try:
                    if source_type !=1:
                        name = f"{name}{source_type}",

                    print(f"⏬ 开始下载: {name}")
                    Download(
                        url=m3u8_url,
                        name=f"{name}",
                        dir="M:\\media",
                        info="".join(tags)
                    )
                    print(f"🎉 下载完成: {name}")
                except Exception as e:
                    logging.error(f"下载失败: {str(e)}")
                    print(f"❌ 下载失败: {name} - {str(e)}")


            time.sleep(random.uniform(1, 5))  # 添加随机延迟[3](@ref)

        except KeyboardInterrupt:
            print("\n🛑 用户中断操作，正在保存进度...")
            self._save_index()
            exit(0)


if __name__ == '__main__':
    crawler = M3U8Crawler()
    print(f"🚀 爬虫启动，当前ID: {crawler.current_id}")
    while True:
        crawler.process_video()