import re
import csv
import requests
from loguru import logger
from tqdm import trange
import time

proxies = {
    'http': 'socks5://127.0.0.1:8442',
    'https': 'socks5://127.0.0.1:8442'
}
# ---------- 配置区 ----------
start_page = 1  # 起始页
end_page = 100  # 结束页（包含）
output_csv = 'video_links.csv'
max_retry = 10          # 最大重试次数
backoff_base = 2       # 退避基数：sleep = base * 2^n
# ----------------------------

headers = {
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,ar;q=0.7",
    "cache-control": "no-cache",
    "pragma": "no-cache",
    "priority": "u=0, i",
    "sec-ch-ua": "\"Not)A;Brand\";v=\"8\", \"Chromium\";v=\"138\", \"Microsoft Edge\";v=\"138\"",
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": "\"Windows\"",
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "none",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36 Edg/138.0.0.0"
}
cookies = {
    "acw_tc": "0e1d3f9a17543644576132016e9a878f475a14050dd9c261f85ce5e725",
    "cdn_sec_tc": "0e1d3f9a17543644576132016e9a878f475a14050dd9c261f85ce5e725",
    "acw_sc__v2": "68917a29809fc88d29174db638a91e5363788dfb",
    "browserId": "9368bd04e5201e086d27e502b11a5c49",
    "Hm_lvt_d50ee9203a35be45d6b0a9db1948f94e": "1754364458",
    "HMACCOUNT": "628427A3BD9CB08E",
    "Hm_up_d50ee9203a35be45d6b0a9db1948f94e": "%7B%22use_new_ui%22%3A%7B%22value%22%3A%221%22%2C%22scope%22%3A1%7D%7D",
    "_ga": "GA1.1.331606996.1754364459",
    "Hm_lpvt_d50ee9203a35be45d6b0a9db1948f94e": "1754366293",
    "_ga_X670GRBYKT": "GS2.1.s1754364458$o1$g1$t1754367646$j54$l0$h0"
}
url = "https://www.vjshi.com/so/shipinsucai.html"

def extract_video_links(html):
    """
    从HTML文本中抽取视频链接和标题，返回 [(mp4链接, 标题), ...]
    """
    # 提取 <img ... alt="标题" ... src="图片链接" ...>
    pattern = re.compile(
        r'<img[^>]*?alt="([^"]+)"[^>]*?src="([^"]+)"', re.DOTALL)
    matches = pattern.findall(html)
    result = []
    for alt, src in matches:
        # 匹配 mp4 路径特征
        path_pattern = re.compile(r'/(\d{4}-\d{2}-\d{2}/[a-f0-9]{32,})(/|\.|_)')
        path_match = path_pattern.search(src)
        if path_match:
            path_segment = path_match.group(1)
            mp4_url = f"https://mp4.vjshi.com/{path_segment}.mp4"
            result.append((mp4_url, alt))
    return result

def get_html_with_retry(page):
    params = {"page": str(page)}
    last_exception = None
    for attempt in range(max_retry):
        try:
            response = requests.get(
                url, headers=headers, cookies=cookies, params=params, timeout=10, proxies=proxies
            )
            if response.status_code == 200:
                return response.text
            else:
                logger.warning(f"第{page}页 请求状态码{response.status_code} (第{attempt+1}次)")
        except Exception as e:
            last_exception = e
            logger.warning(f"第{page}页 请求异常: {e} (第{attempt+1}次)")
        sleep_time = backoff_base * (2 ** attempt)
        logger.info(f"等待 {sleep_time}s 后重试...")
        time.sleep(sleep_time)
    logger.error(f"第{page}页 超过最大重试次数仍失败。原因: {last_exception}")
    return None

def crawl_pages(start, end):
    all_video_links = []
    seen_links = set()  # 用于去重
    for page in trange(start, end + 1, desc="遍历页数", ncols=80):
        html = get_html_with_retry(page)
        if html:
            video_datas = extract_video_links(html)
            logger.info(f"第{page}页 共抓取到{len(video_datas)}个视频链接")
            for mp4_url, title in video_datas:
                if mp4_url not in seen_links:
                    logger.debug(f"第{page}页 视频: {mp4_url} 标题: {title}")
                    all_video_links.append([mp4_url, page, title])
                    seen_links.add(mp4_url)
        else:
            logger.error(f"第{page}页 放弃本页数据")
    return all_video_links

def save_csv(data, file_path):
    with open(file_path, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(["mp4链接", "页码", "标题"])
        writer.writerows(data)
    logger.success(f"所有链接已保存到 {file_path} ，共 {len(data)} 条")

if __name__ == '__main__':
    logger.add("run.log", rotation="1 MB")
    logger.info(f"采集起止页: {start_page} - {end_page}")
    video_links_with_pages = crawl_pages(start_page, end_page)
    save_csv(video_links_with_pages, output_csv)
    logger.info("任务全部完成")