import os
import re
import pandas as pd
import requests
from jsonpath_ng import parse
import random
import time
from bs4 import BeautifulSoup
import json
import datetime
from urllib.parse import quote
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', '豆瓣电影数据分析可视化系统.settings')
django.setup()
from myApp.models import Movies

# 增加User-Agent池，减少被识别为爬虫的概率
USER_AGENTS = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.5 Safari/605.1.15",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0",
    "Mozilla/5.0 (Linux; Android 13; SM-G998B) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Mobile Safari/537.36",
    "Mozilla/5.0 (iPhone; CPU iPhone OS 16_5 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.5 Mobile/15E148 Safari/604.1"
]


def get_random_headers():
    """生成随机请求头（包含登录状态Cookie）"""
    return {
        "User-Agent": random.choice(USER_AGENTS),
        # 替换为登录后复制的完整Cookie（注意保留引号）
        "Cookie": 'bid=PqAVbM5QlYI; ll="118360"; _vwo_uuid_v2=DD2A1C0F897BE8419CC81A2F0BF9CB78B|c314ed5c401d7eca61e8912d7f470972; __utmc=30149280; _pk_id.100001.8cb4=528102be93b62978.1754745091.; _pk_ref.100001.8cb4=%5B%22%22%2C%22%22%2C1754747497%2C%22https%3A%2F%2Fcn.bing.com%2F%22%5D; _pk_ses.100001.8cb4=1; ct=y; __utma=30149280.1074194882.1754495139.1754745091.1754747498.9; __utmz=30149280.1754747498.9.3.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; dbcl2="290508954:3ElXQMu14fE"; ck=zM3U; ap_v=0,6.0; frodotk_db="bc208e4e7efe24153461aaeb7cfa6c3f"; push_noty_num=0; push_doumail_num=0; __utmv=30149280.29050; 6333762c95037d16=Ub1wdAoApmRya4VOEleF%2BYv8vmegkN0krKZNVZLxCJshvyGlW7cd3JRSzI4wSCMAirr0EdnpxWaCpufoUuDTj%2FQl0A6BIT6Cu%2BzMLvxIQF2orvaRiZ6bT3c6245y1vp0MRrb16eqLUq2S5EaacEqxKZV55M7jn1Xruj2wTBHlUriW7YFgk878SYJjSuOLqn3n8Gl8A0tmWZdL0zK4SAb3K9EKb4Pc%2BDQHEUSD5lGjZ1KacXsCIn%2BUFt%2BaRG5SXz7Nl1CW6ImaBYAPL8Kc%2BXxA2kuzMkQisGjSzlxZpexbdgLXu5l4ncD4w%3D%3D; __yadk_uid=1aPyDcWIo0DBfvY1jJp4eVRbsGqtaoUv; __utmb=30149280.13.10.1754747498; _TDID_CK=1754747628614',  # 此处替换为你的登录后Cookie
        "Referer": "https://movie.douban.com/",
        "Accept": "application/json, text/javascript, */*; q=0.01",  # 适配JSON接口
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive"
    }


def retry_request(url, headers, max_retries=3, params=None):
    """带重试机制的请求函数"""
    for i in range(max_retries):
        try:
            # 直接使用requests的params参数自动处理编码，无需手动编码URL
            response = requests.get(url, headers=headers, params=params, timeout=10)
            if response.status_code == 200:
                return response
            print(f"请求失败，状态码：{response.status_code}，重试第{i + 1}次...")
            time.sleep(random.uniform(2, 4))
        except Exception as e:
            print(f"请求异常：{e}，重试第{i + 1}次...")
            time.sleep(random.uniform(2, 4))
    return None


def spider(spiderTarget, start):
    headers = get_random_headers()
    params = {'page_start': start}
    movieAllRes = retry_request(spiderTarget, headers, params=params)
    if not movieAllRes:
        print(f"多次重试后仍无法获取列表页数据（start={start}）")
        return

    try:
        movieAllRes = json.loads(movieAllRes.text)
    except json.JSONDecodeError as e:
        print(f"JSON解析失败：{e}，返回内容：{movieAllRes.text[:200]}")
        return

    # 检查是否有数据
    if not movieAllRes.get('subjects'):
        print(f"当前页无数据（start={start}），可能已达最大页数")
        return

    movie_infos = parse("$.subjects").find(movieAllRes)[0].value
    detail_urls = [match.value for match in parse("$.subjects..url").find(movieAllRes)]

    for i, movie_info in enumerate(movie_infos):
        try:  # 单条数据爬取异常不影响整体
            result_data = {}
            result_data['detailLink'] = detail_urls[i]
            result_data['rate'] = movie_info.get('rate', '0')
            result_data['title'] = movie_info.get('title', '未知标题')
            result_data['cover'] = movie_info.get('cover', '')

            # 随机间隔，增加随机性
            time.sleep(random.uniform(1.5, 3.5))

            # 爬取详情页
            detail_res = retry_request(detail_urls[i], headers)
            if not detail_res:
                print(f"详情页请求失败：{detail_urls[i]}")
                continue
            soup = BeautifulSoup(detail_res.text, "lxml")

            # 提取年份（优化版）
            title = result_data['title']
            year_match = re.search(r'\((\d{4})\)', title)
            if year_match:
                result_data['year'] = year_match.group(1)
            else:
                release_date_tag = soup.find('span', property='v:initialReleaseDate')
                if release_date_tag:
                    date_year_match = re.search(r'\d{4}', release_date_tag.get_text())
                    result_data['year'] = date_year_match.group() if date_year_match else '未知年份'
                else:
                    result_data['year'] = '未知年份'

            # 提取类型
            types = soup.find_all('span', property='v:genre') or soup.find_all('span', class_='genre')
            result_data['type'] = ','.join([span.get_text().strip() for span in types]) if types else '未知类型'

            # 提取制片国家（通过文本匹配，避免固定索引错误）
            country_tag = soup.find('span', class_='pl', string=re.compile('制片国家/地区:'))
            if country_tag and country_tag.next_sibling:
                country = [c.strip() for c in country_tag.next_sibling.strip().split('/')]
                result_data['country'] = ','.join(country)
            else:
                result_data['country'] = '未知国家'

            # 提取语言（同理优化）
            language_tag = soup.find('span', class_='pl', string=re.compile('语言:'))
            if language_tag and language_tag.next_sibling:
                language = [l.strip() for l in language_tag.next_sibling.strip().split('/')]
                result_data['language'] = ','.join(language)
            else:
                result_data['language'] = '未知语言'

            # 上映时间
            uptime_tags = soup.find_all('span', property='v:initialReleaseDate')
            uptimeString = ''.join([tag.get_text() for tag in uptime_tags])
            uptime_match = re.findall(r'\d*-\d*-\d*', uptimeString)
            result_data['uptime'] = uptime_match[0] if uptime_match else '未知时间'

            # 时长
            runtime_tag = soup.find('span', property='v:runtime')
            if runtime_tag:
                time_match = re.findall(r'\d+', runtime_tag.get_text())
                result_data['movietime'] = time_match[0] if time_match else 0
            else:
                result_data['movietime'] = 0

            # 评论个数
            votes_tag = soup.find('span', property="v:votes")
            result_data['comment'] = votes_tag.get_text() if votes_tag else '0'

            # 星星比例
            startsall = soup.find_all('span', class_="rating_per")
            result_data['starts'] = '.'.join([s.get_text() for s in startsall]) if startsall else ''

            # 影片简介
            summary_tag = soup.find('span', property="v:summary")
            result_data['summary'] = summary_tag.get_text().strip() if summary_tag else '无简介'

            # 五条热评
            comment_info = soup.find_all('span', class_="comment-info")
            comments = [{} for _ in range(5)]
            for j, comment in enumerate(comment_info[:5]):
                try:
                    comments[j]['user'] = comment.contents[1].get_text() if len(comment.contents) > 1 else '匿名用户'
                    star_class = comment.contents[5].attrs['class'][0] if len(comment.contents) > 5 else ''
                    star_match = re.findall(r'\d+', star_class)
                    comments[j]['start'] = star_match[0] if star_match else '0'
                    comments[j]['time'] = comment.contents[7].attrs['title'] if len(comment.contents) > 7 else '无时间'
                except:
                    comments[j] = {'user': '获取失败', 'start': '0', 'time': '无'}

            contents = soup.find_all('span', class_="short")
            for j in range(min(5, len(contents))):
                comments[j]['comments'] = contents[j].get_text()
            result_data['comments'] = json.dumps(comments)

            # 图片
            imglist = [img['src'] for img in soup.select('.related-pic-bd img')]
            result_data['imglist'] = '.'.join(imglist) if imglist else ''

            # 视频
            video_tag = soup.find('a', class_="related-pic-video")
            if video_tag:
                movieurl = video_tag.get('href', '')
                foreshowMovieRes = retry_request(movieurl, headers)
                if foreshowMovieRes:
                    foreshowSoup = BeautifulSoup(foreshowMovieRes.text, "lxml")
                    source_tag = foreshowSoup.find('source')
                    result_data['movieurl'] = source_tag.get('src', '') if source_tag else '0'
                else:
                    result_data['movieurl'] = '0'
            else:
                result_data['movieurl'] = '0'

            result.append(result_data)
            print(f'已爬取 {len(result)} 条数据')
        except Exception as e:
            print(f"处理第{i}条数据时出错：{e}，跳过该条")
            continue


def save_to_csv(df, is_first_page=False):
    """优化CSV保存逻辑"""
    mode = 'w' if is_first_page else 'a'
    header = is_first_page
    df.to_csv('./datas.csv', mode=mode, header=header, index=False)


def main():
    global result
    result = []
    max_pages = 10  # 增加爬取页数（可根据需求调整，建议不要过大）
    current_page = 0

    # 读取上次爬取进度
    try:
        with open('./pagenum.txt', 'r') as fr:
            lines = fr.readlines()
            if lines:
                current_page = int(lines[-1])
    except FileNotFoundError:
        current_page = 0

    # 循环爬取多页
    for page in range(current_page, current_page + max_pages):
        print(f'\n开始爬取第{page}页数据（每页20条）')
        result = []
        try:
            # 计算起始位置（每页20条）
            start = page * 20
            spider(spider_target, start)

            if result:
                df = pd.DataFrame(result)
                save_to_csv(df, is_first_page=(page == current_page))
                print(f'第{page}页数据已保存到CSV')

                # 更新进度
                with open('./pagenum.txt', 'w') as fw:
                    fw.write(str(page + 1))
            else:
                print(f'第{page}页未获取到有效数据，停止爬取')
                break

            # 每页之间的间隔更长且随机
            sleep_time = random.uniform(8, 15)
            print(f'等待{sleep_time:.1f}秒后继续下一页...')
            time.sleep(sleep_time)

        except Exception as e:
            print(f'爬取第{page}页时发生错误: {e}，已保存当前进度')
            with open('./pagenum.txt', 'w') as fw:
                fw.write(str(page))
            break


def clear_csv():
    """优化数据清洗"""
    if not os.path.exists('./datas.csv'):
        return []
    df = pd.read_csv('./datas.csv')
    df.dropna(subset=['title', 'detailLink'], inplace=True)  # 关键字段不能为空
    df.drop_duplicates(subset=['detailLink'], inplace=True)  # 根据详情链接去重
    return df.values


def save_to_sql():
    data = clear_csv()
    for movie in data:
        try:
            # 检查是否已存在，避免重复入库
            if not Movies.objects.filter(detailLink=movie[0]).exists():
                Movies.objects.create(
                    detailLink=movie[0],
                    rate=movie[1],
                    title=movie[2],
                    cover=movie[3],
                    year=movie[4],
                    type=movie[5],
                    country=movie[6],
                    language=movie[7],
                    uptime=movie[8],
                    movietime=movie[9],
                    comment=movie[10],
                    starts=movie[11],
                    summary=movie[12],
                    comments=movie[13],
                    imglist=movie[14],
                    movieurl=movie[15],
                )
        except Exception as e:
            print(f"入库失败：{e}，数据：{movie[2]}")
            continue


if __name__ == "__main__":
    print('爬取开始')
    # 目标URL（保持分页参数灵活）
    # 修改目标URL，移除末尾的page_start=
    spider_target = 'https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%83%AD%E9%97%A8&sort=recommend&page_limit=20'
    #main()  # 启用主爬取函数
    save_to_sql()  # 爬取完成后再执行入库（避免频繁操作数据库）